mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge branch 'master' of github.com:grafana/grafana into externalPlugin
Conflicts: pkg/api/api.go pkg/api/api_plugin.go pkg/api/datasources.go pkg/api/frontendsettings.go pkg/api/index.go pkg/plugins/models.go pkg/plugins/plugins.go
This commit is contained in:
commit
0903d5541b
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,4 +1,5 @@
|
||||
node_modules
|
||||
npm-debug.log
|
||||
coverage/
|
||||
.aws-config.json
|
||||
awsconfig
|
||||
|
@ -27,6 +27,7 @@
|
||||
"maxlen": 140,
|
||||
|
||||
"globals": {
|
||||
"System": true,
|
||||
"define": true,
|
||||
"require": true,
|
||||
"Chromath": false,
|
||||
|
36
CHANGELOG.md
36
CHANGELOG.md
@ -1,4 +1,33 @@
|
||||
# 2.6.0 (unreleased)
|
||||
# 3.0.0 (unrelased master branch)
|
||||
|
||||
|
||||
### Breaking changes
|
||||
**InfluxDB 0.8.x** The data source for the old version of influxdb (0.8.x) is no longer included in default builds. Can easily be installed via improved plugin system, closes #3523
|
||||
**KairosDB** The data source is no longer included in default builds. Can easily be installed via improved plugin system, closes #3524
|
||||
|
||||
### Enhancements ###
|
||||
* **Sessions**: Support for memcached as session storage, closes [#3458](https://github.com/grafana/grafana/pull/3458)
|
||||
|
||||
# 2.6.1 (unrelased, 2.6.x branch)
|
||||
|
||||
### New Features
|
||||
* **Elasticsearch**: Support for derivative unit option, closes [#3512](https://github.com/grafana/grafana/issues/3512)
|
||||
|
||||
# 2.6.0 (2015-12-14)
|
||||
|
||||
### New Features
|
||||
* **Elasticsearch**: Support for pipeline aggregations Moving average and derivative, closes [#2715](https://github.com/grafana/grafana/issues/2715)
|
||||
* **Elasticsearch**: Support for inline script and missing options for metrics, closes [#3500](https://github.com/grafana/grafana/issues/3500)
|
||||
* **Syslog**: Support for syslog logging, closes [#3161](https://github.com/grafana/grafana/pull/3161)
|
||||
* **Timepicker**: Always show refresh button even with refresh rate, closes [#3498](https://github.com/grafana/grafana/pull/3498)
|
||||
* **Login**: Make it possible to change the login hint on the login page, closes [#2571](https://github.com/grafana/grafana/pull/2571)
|
||||
|
||||
### Bug Fixes
|
||||
* **metric editors**: Fix for clicking typeahead auto dropdown option, fixes [#3428](https://github.com/grafana/grafana/issues/3428)
|
||||
* **influxdb**: Fixed issue showing Group By label only on first query, fixes [#3453](https://github.com/grafana/grafana/issues/3453)
|
||||
* **logging**: Add more verbose info logging for http reqeusts, closes [#3405](https://github.com/grafana/grafana/pull/3405)
|
||||
|
||||
# 2.6.0-Beta1 (2015-12-04)
|
||||
|
||||
### New Table Panel
|
||||
* **table**: New powerful and flexible table panel, closes [#215](https://github.com/grafana/grafana/issues/215)
|
||||
@ -6,9 +35,9 @@
|
||||
### Enhancements
|
||||
* **CloudWatch**: Support for multiple AWS Credentials, closes [#3053](https://github.com/grafana/grafana/issues/3053), [#3080](https://github.com/grafana/grafana/issues/3080)
|
||||
* **Elasticsearch**: Support for dynamic daily indices for annotations, closes [#3061](https://github.com/grafana/grafana/issues/3061)
|
||||
* **Elasticsearch**: Support for setting min_doc_count for date histogram, closes [#3416](https://github.com/grafana/grafana/issues/3416)
|
||||
* **Graph Panel**: Option to hide series with all zeroes from legend and tooltip, closes [#1381](https://github.com/grafana/grafana/issues/1381), [#3336](https://github.com/grafana/grafana/issues/3336)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
* **cloudwatch**: fix for handling of period for long time ranges, fixes [#3086](https://github.com/grafana/grafana/issues/3086)
|
||||
* **dashboard**: fix for collapse row by clicking on row title, fixes [#3065](https://github.com/grafana/grafana/issues/3065)
|
||||
@ -16,6 +45,9 @@
|
||||
* **graph**: layout fix for color picker when right side legend was enabled, fixes [#3093](https://github.com/grafana/grafana/issues/3093)
|
||||
* **elasticsearch**: disabling elastic query (via eye) caused error, fixes [#3300](https://github.com/grafana/grafana/issues/3300)
|
||||
|
||||
### Breaking changes
|
||||
* **elasticsearch**: Manual json edited queries are not supported any more (They very barely worked in 2.5)
|
||||
|
||||
# 2.5 (2015-10-28)
|
||||
|
||||
**New Feature: Mix data sources**
|
||||
|
47
Godeps/Godeps.json
generated
47
Godeps/Godeps.json
generated
@ -20,53 +20,53 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws",
|
||||
"Comment": "v0.10.4-18-gce51895",
|
||||
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
|
||||
"Comment": "v0.10.4-18-gce51895",
|
||||
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
|
||||
"Comment": "v0.10.4-18-gce51895",
|
||||
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
|
||||
"Comment": "v0.10.4-18-gce51895",
|
||||
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
|
||||
"Comment": "v0.10.4-18-gce51895",
|
||||
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
|
||||
"Comment": "v0.10.4-18-gce51895",
|
||||
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
|
||||
"Comment": "v0.10.4-18-gce51895",
|
||||
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
|
||||
"Comment": "v0.10.4-18-gce51895",
|
||||
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch",
|
||||
"Comment": "v0.10.4-18-gce51895",
|
||||
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
|
||||
"Comment": "v0.10.4-18-gce51895",
|
||||
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
@ -166,6 +166,11 @@
|
||||
"ImportPath": "gopkg.in/redis.v2",
|
||||
"Comment": "v2.3.2",
|
||||
"Rev": "e6179049628164864e6e84e973cfb56335748dea"
|
||||
}
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bradfitz/gomemcache/memcache",
|
||||
"Comment": "release.r60-40-g72a6864",
|
||||
"Rev": "72a68649ba712ee7c4b5b4a943a626bcd7d90eb8"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
26
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
26
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
@ -13,11 +13,11 @@ var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
|
||||
|
||||
// rValuesAtPath returns a slice of values found in value v. The values
|
||||
// in v are explored recursively so all nested values are collected.
|
||||
func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) []reflect.Value {
|
||||
func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
|
||||
pathparts := strings.Split(path, "||")
|
||||
if len(pathparts) > 1 {
|
||||
for _, pathpart := range pathparts {
|
||||
vals := rValuesAtPath(v, pathpart, create, caseSensitive)
|
||||
vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
|
||||
if len(vals) > 0 {
|
||||
return vals
|
||||
}
|
||||
@ -76,7 +76,16 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool)
|
||||
return false
|
||||
})
|
||||
|
||||
if create && value.Kind() == reflect.Ptr && value.IsNil() {
|
||||
if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
|
||||
if !value.IsNil() {
|
||||
value.Set(reflect.Zero(value.Type()))
|
||||
}
|
||||
return []reflect.Value{value}
|
||||
}
|
||||
|
||||
if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
|
||||
// TODO if the value is the terminus it should not be created
|
||||
// if the value to be set to its position is nil.
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
value = value.Elem()
|
||||
} else {
|
||||
@ -84,7 +93,7 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool)
|
||||
}
|
||||
|
||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||
if !create && value.IsNil() {
|
||||
if !createPath && value.IsNil() {
|
||||
value = reflect.ValueOf(nil)
|
||||
}
|
||||
}
|
||||
@ -116,7 +125,7 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool)
|
||||
// pull out index
|
||||
i := int(*index)
|
||||
if i >= value.Len() { // check out of bounds
|
||||
if create {
|
||||
if createPath {
|
||||
// TODO resize slice
|
||||
} else {
|
||||
continue
|
||||
@ -127,7 +136,7 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool)
|
||||
value = reflect.Indirect(value.Index(i))
|
||||
|
||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||
if !create && value.IsNil() {
|
||||
if !createPath && value.IsNil() {
|
||||
value = reflect.ValueOf(nil)
|
||||
}
|
||||
}
|
||||
@ -176,8 +185,11 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
|
||||
// SetValueAtPath sets a value at the case insensitive lexical path inside
|
||||
// of a structure.
|
||||
func SetValueAtPath(i interface{}, path string, v interface{}) {
|
||||
if rvals := rValuesAtPath(i, path, true, false); rvals != nil {
|
||||
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
|
||||
for _, rval := range rvals {
|
||||
if rval.Kind() == reflect.Ptr && rval.IsNil() {
|
||||
continue
|
||||
}
|
||||
setValue(rval, v)
|
||||
}
|
||||
}
|
||||
|
34
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go
generated
vendored
34
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go
generated
vendored
@ -105,4 +105,38 @@ func TestSetValueAtPathSuccess(t *testing.T) {
|
||||
assert.Equal(t, "test0", s2.B.B.C)
|
||||
awsutil.SetValueAtPath(&s2, "A", []Struct{{}})
|
||||
assert.Equal(t, []Struct{{}}, s2.A)
|
||||
|
||||
str := "foo"
|
||||
|
||||
s3 := Struct{}
|
||||
awsutil.SetValueAtPath(&s3, "b.b.c", str)
|
||||
assert.Equal(t, "foo", s3.B.B.C)
|
||||
|
||||
s3 = Struct{B: &Struct{B: &Struct{C: str}}}
|
||||
awsutil.SetValueAtPath(&s3, "b.b.c", nil)
|
||||
assert.Equal(t, "", s3.B.B.C)
|
||||
|
||||
s3 = Struct{}
|
||||
awsutil.SetValueAtPath(&s3, "b.b.c", nil)
|
||||
assert.Equal(t, "", s3.B.B.C)
|
||||
|
||||
s3 = Struct{}
|
||||
awsutil.SetValueAtPath(&s3, "b.b.c", &str)
|
||||
assert.Equal(t, "foo", s3.B.B.C)
|
||||
|
||||
var s4 struct{ Name *string }
|
||||
awsutil.SetValueAtPath(&s4, "Name", str)
|
||||
assert.Equal(t, str, *s4.Name)
|
||||
|
||||
s4 = struct{ Name *string }{}
|
||||
awsutil.SetValueAtPath(&s4, "Name", nil)
|
||||
assert.Equal(t, (*string)(nil), s4.Name)
|
||||
|
||||
s4 = struct{ Name *string }{Name: &str}
|
||||
awsutil.SetValueAtPath(&s4, "Name", nil)
|
||||
assert.Equal(t, (*string)(nil), s4.Name)
|
||||
|
||||
s4 = struct{ Name *string }{}
|
||||
awsutil.SetValueAtPath(&s4, "Name", &str)
|
||||
assert.Equal(t, str, *s4.Name)
|
||||
}
|
||||
|
17
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
17
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
@ -41,11 +41,20 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op
|
||||
Handlers: handlers,
|
||||
}
|
||||
|
||||
maxRetries := aws.IntValue(cfg.MaxRetries)
|
||||
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
||||
maxRetries = 3
|
||||
switch retryer, ok := cfg.Retryer.(request.Retryer); {
|
||||
case ok:
|
||||
svc.Retryer = retryer
|
||||
case cfg.Retryer != nil && cfg.Logger != nil:
|
||||
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
|
||||
cfg.Logger.Log(s)
|
||||
fallthrough
|
||||
default:
|
||||
maxRetries := aws.IntValue(cfg.MaxRetries)
|
||||
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
||||
maxRetries = 3
|
||||
}
|
||||
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
||||
}
|
||||
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
||||
|
||||
svc.AddDebugHandlers()
|
||||
|
||||
|
22
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
22
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
@ -12,6 +12,9 @@ import (
|
||||
// is nil also.
|
||||
const UseServiceDefaultRetries = -1
|
||||
|
||||
// RequestRetryer is an alias for a type that implements the request.Retryer interface.
|
||||
type RequestRetryer interface{}
|
||||
|
||||
// A Config provides service configuration for service clients. By default,
|
||||
// all clients will use the {defaults.DefaultConfig} structure.
|
||||
type Config struct {
|
||||
@ -59,6 +62,21 @@ type Config struct {
|
||||
// configuration.
|
||||
MaxRetries *int
|
||||
|
||||
// Retryer guides how HTTP requests should be retried in case of recoverable failures.
|
||||
//
|
||||
// When nil or the value does not implement the request.Retryer interface,
|
||||
// the request.DefaultRetryer will be used.
|
||||
//
|
||||
// When both Retryer and MaxRetries are non-nil, the former is used and
|
||||
// the latter ignored.
|
||||
//
|
||||
// To set the Retryer field in a type-safe manner and with chaining, use
|
||||
// the request.WithRetryer helper function:
|
||||
//
|
||||
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
|
||||
//
|
||||
Retryer RequestRetryer
|
||||
|
||||
// Disables semantic parameter validation, which validates input for missing
|
||||
// required fields and/or other semantic request input errors.
|
||||
DisableParamValidation *bool
|
||||
@ -217,6 +235,10 @@ func mergeInConfig(dst *Config, other *Config) {
|
||||
dst.MaxRetries = other.MaxRetries
|
||||
}
|
||||
|
||||
if other.Retryer != nil {
|
||||
dst.Retryer = other.Retryer
|
||||
}
|
||||
|
||||
if other.DisableParamValidation != nil {
|
||||
dst.DisableParamValidation = other.DisableParamValidation
|
||||
}
|
||||
|
14
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
14
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
@ -44,12 +44,19 @@ func (r *Request) nextPageTokens() []interface{} {
|
||||
}
|
||||
|
||||
tokens := []interface{}{}
|
||||
tokenAdded := false
|
||||
for _, outToken := range r.Operation.OutputTokens {
|
||||
v, _ := awsutil.ValuesAtPath(r.Data, outToken)
|
||||
if len(v) > 0 {
|
||||
tokens = append(tokens, v[0])
|
||||
tokenAdded = true
|
||||
} else {
|
||||
tokens = append(tokens, nil)
|
||||
}
|
||||
}
|
||||
if !tokenAdded {
|
||||
return nil
|
||||
}
|
||||
|
||||
return tokens
|
||||
}
|
||||
@ -85,9 +92,10 @@ func (r *Request) NextPage() *Request {
|
||||
// return true to keep iterating or false to stop.
|
||||
func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
|
||||
for page := r; page != nil; page = page.NextPage() {
|
||||
page.Send()
|
||||
shouldContinue := fn(page.Data, !page.HasNextPage())
|
||||
if page.Error != nil || !shouldContinue {
|
||||
if err := page.Send(); err != nil {
|
||||
return err
|
||||
}
|
||||
if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
|
||||
return page.Error
|
||||
}
|
||||
}
|
||||
|
63
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go
generated
vendored
63
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/awstesting/unit"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/route53"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
@ -314,7 +315,69 @@ func TestPaginationTruncation(t *testing.T) {
|
||||
|
||||
assert.Equal(t, []string{"Key1", "Key2"}, results)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestPaginationNilToken(t *testing.T) {
|
||||
client := route53.New(unit.Session)
|
||||
|
||||
reqNum := 0
|
||||
resps := []*route53.ListResourceRecordSetsOutput{
|
||||
{
|
||||
ResourceRecordSets: []*route53.ResourceRecordSet{
|
||||
{Name: aws.String("first.example.com.")},
|
||||
},
|
||||
IsTruncated: aws.Bool(true),
|
||||
NextRecordName: aws.String("second.example.com."),
|
||||
NextRecordType: aws.String("MX"),
|
||||
NextRecordIdentifier: aws.String("second"),
|
||||
MaxItems: aws.String("1"),
|
||||
},
|
||||
{
|
||||
ResourceRecordSets: []*route53.ResourceRecordSet{
|
||||
{Name: aws.String("second.example.com.")},
|
||||
},
|
||||
IsTruncated: aws.Bool(true),
|
||||
NextRecordName: aws.String("third.example.com."),
|
||||
NextRecordType: aws.String("MX"),
|
||||
MaxItems: aws.String("1"),
|
||||
},
|
||||
{
|
||||
ResourceRecordSets: []*route53.ResourceRecordSet{
|
||||
{Name: aws.String("third.example.com.")},
|
||||
},
|
||||
IsTruncated: aws.Bool(false),
|
||||
MaxItems: aws.String("1"),
|
||||
},
|
||||
}
|
||||
client.Handlers.Send.Clear() // mock sending
|
||||
client.Handlers.Unmarshal.Clear()
|
||||
client.Handlers.UnmarshalMeta.Clear()
|
||||
client.Handlers.ValidateResponse.Clear()
|
||||
|
||||
idents := []string{}
|
||||
client.Handlers.Build.PushBack(func(r *request.Request) {
|
||||
p := r.Params.(*route53.ListResourceRecordSetsInput)
|
||||
idents = append(idents, aws.StringValue(p.StartRecordIdentifier))
|
||||
|
||||
})
|
||||
client.Handlers.Unmarshal.PushBack(func(r *request.Request) {
|
||||
r.Data = resps[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
|
||||
params := &route53.ListResourceRecordSetsInput{
|
||||
HostedZoneId: aws.String("id-zone"),
|
||||
}
|
||||
|
||||
results := []string{}
|
||||
err := client.ListResourceRecordSetsPages(params, func(p *route53.ListResourceRecordSetsOutput, last bool) bool {
|
||||
results = append(results, *p.ResourceRecordSets[0].Name)
|
||||
return true
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []string{"", "second", ""}, idents)
|
||||
assert.Equal(t, []string{"first.example.com.", "second.example.com.", "third.example.com."}, results)
|
||||
}
|
||||
|
||||
// Benchmarks
|
||||
|
8
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
8
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
@ -3,6 +3,7 @@ package request
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
@ -15,6 +16,13 @@ type Retryer interface {
|
||||
MaxRetries() int
|
||||
}
|
||||
|
||||
// WithRetryer sets a config Retryer value to the given Config returning it
|
||||
// for chaining.
|
||||
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
|
||||
cfg.Retryer = retryer
|
||||
return cfg
|
||||
}
|
||||
|
||||
// retryableCodes is a collection of service response codes which are retry-able
|
||||
// without any further action.
|
||||
var retryableCodes = map[string]struct{}{
|
||||
|
2
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "0.10.4"
|
||||
const SDKVersion = "1.0.0"
|
||||
|
95
Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go
generated
vendored
95
Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
@ -47,52 +48,74 @@ func (w *Waiter) Wait() error {
|
||||
res := method.Call([]reflect.Value{in})
|
||||
req := res[0].Interface().(*request.Request)
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter"))
|
||||
if err := req.Send(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := req.Send()
|
||||
for _, a := range w.Acceptors {
|
||||
if err != nil && a.Matcher != "error" {
|
||||
// Only matcher error is valid if there is a request error
|
||||
continue
|
||||
}
|
||||
|
||||
result := false
|
||||
var vals []interface{}
|
||||
switch a.Matcher {
|
||||
case "pathAll":
|
||||
if vals, _ := awsutil.ValuesAtPath(req.Data, a.Argument); req.Error == nil && vals != nil {
|
||||
result = true
|
||||
for _, val := range vals {
|
||||
if !awsutil.DeepEqual(val, a.Expected) {
|
||||
result = false
|
||||
break
|
||||
}
|
||||
case "pathAll", "path":
|
||||
// Require all matches to be equal for result to match
|
||||
vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
|
||||
result = true
|
||||
for _, val := range vals {
|
||||
if !awsutil.DeepEqual(val, a.Expected) {
|
||||
result = false
|
||||
break
|
||||
}
|
||||
}
|
||||
case "pathAny":
|
||||
if vals, _ := awsutil.ValuesAtPath(req.Data, a.Argument); req.Error == nil && vals != nil {
|
||||
for _, val := range vals {
|
||||
if awsutil.DeepEqual(val, a.Expected) {
|
||||
result = true
|
||||
break
|
||||
}
|
||||
// Only a single match needs to equal for the result to match
|
||||
vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
|
||||
for _, val := range vals {
|
||||
if awsutil.DeepEqual(val, a.Expected) {
|
||||
result = true
|
||||
break
|
||||
}
|
||||
}
|
||||
case "status":
|
||||
s := a.Expected.(int)
|
||||
result = s == req.HTTPResponse.StatusCode
|
||||
case "error":
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
result = aerr.Code() == a.Expected.(string)
|
||||
}
|
||||
case "pathList":
|
||||
// ignored matcher
|
||||
default:
|
||||
logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s",
|
||||
w.Config.Operation, a.Matcher)
|
||||
}
|
||||
|
||||
if result {
|
||||
switch a.State {
|
||||
case "success":
|
||||
return nil // waiter completed
|
||||
case "failure":
|
||||
if req.Error == nil {
|
||||
return awserr.New("ResourceNotReady",
|
||||
fmt.Sprintf("failed waiting for successful resource state"), nil)
|
||||
}
|
||||
return req.Error // waiter failed
|
||||
case "retry":
|
||||
// do nothing, just retry
|
||||
}
|
||||
break
|
||||
if !result {
|
||||
// If there was no matching result found there is nothing more to do
|
||||
// for this response, retry the request.
|
||||
continue
|
||||
}
|
||||
|
||||
switch a.State {
|
||||
case "success":
|
||||
// waiter completed
|
||||
return nil
|
||||
case "failure":
|
||||
// Waiter failure state triggered
|
||||
return awserr.New("ResourceNotReady",
|
||||
fmt.Sprintf("failed waiting for successful resource state"), err)
|
||||
case "retry":
|
||||
// clear the error and retry the operation
|
||||
err = nil
|
||||
default:
|
||||
logf(client, "WARNING: Waiter for %s encountered unexpected state: %s",
|
||||
w.Config.Operation, a.State)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
time.Sleep(time.Second * time.Duration(w.Delay))
|
||||
@ -101,3 +124,13 @@ func (w *Waiter) Wait() error {
|
||||
return awserr.New("ResourceNotReady",
|
||||
fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil)
|
||||
}
|
||||
|
||||
func logf(client reflect.Value, msg string, args ...interface{}) {
|
||||
cfgVal := client.FieldByName("Config")
|
||||
if !cfgVal.IsValid() {
|
||||
return
|
||||
}
|
||||
if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil {
|
||||
cfg.Logger.Log(fmt.Sprintf(msg, args...))
|
||||
}
|
||||
}
|
||||
|
252
Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go
generated
vendored
252
Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go
generated
vendored
@ -1,6 +1,9 @@
|
||||
package waiter_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -41,22 +44,7 @@ func (c *mockClient) MockRequest(input *MockInput) (*request.Request, *MockOutpu
|
||||
return req, output
|
||||
}
|
||||
|
||||
var mockAcceptors = []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "States[].State",
|
||||
Expected: "running",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "States[].State",
|
||||
Expected: "stopping",
|
||||
},
|
||||
}
|
||||
|
||||
func TestWaiter(t *testing.T) {
|
||||
func TestWaiterPathAll(t *testing.T) {
|
||||
svc := &mockClient{Client: awstesting.NewClient(&aws.Config{
|
||||
Region: aws.String("mock-region"),
|
||||
})}
|
||||
@ -73,13 +61,13 @@ func TestWaiter(t *testing.T) {
|
||||
{State: aws.String("pending")},
|
||||
},
|
||||
},
|
||||
{ // Request 1
|
||||
{ // Request 2
|
||||
States: []*MockState{
|
||||
{State: aws.String("running")},
|
||||
{State: aws.String("pending")},
|
||||
},
|
||||
},
|
||||
{ // Request 1
|
||||
{ // Request 3
|
||||
States: []*MockState{
|
||||
{State: aws.String("running")},
|
||||
{State: aws.String("running")},
|
||||
@ -104,7 +92,83 @@ func TestWaiter(t *testing.T) {
|
||||
Operation: "Mock",
|
||||
Delay: 0,
|
||||
MaxAttempts: 10,
|
||||
Acceptors: mockAcceptors,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "States[].State",
|
||||
Expected: "running",
|
||||
},
|
||||
},
|
||||
}
|
||||
w := waiter.Waiter{
|
||||
Client: svc,
|
||||
Input: &MockInput{},
|
||||
Config: waiterCfg,
|
||||
}
|
||||
|
||||
err := w.Wait()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, numBuiltReq)
|
||||
assert.Equal(t, 3, reqNum)
|
||||
}
|
||||
|
||||
func TestWaiterPath(t *testing.T) {
|
||||
svc := &mockClient{Client: awstesting.NewClient(&aws.Config{
|
||||
Region: aws.String("mock-region"),
|
||||
})}
|
||||
svc.Handlers.Send.Clear() // mock sending
|
||||
svc.Handlers.Unmarshal.Clear()
|
||||
svc.Handlers.UnmarshalMeta.Clear()
|
||||
svc.Handlers.ValidateResponse.Clear()
|
||||
|
||||
reqNum := 0
|
||||
resps := []*MockOutput{
|
||||
{ // Request 1
|
||||
States: []*MockState{
|
||||
{State: aws.String("pending")},
|
||||
{State: aws.String("pending")},
|
||||
},
|
||||
},
|
||||
{ // Request 2
|
||||
States: []*MockState{
|
||||
{State: aws.String("running")},
|
||||
{State: aws.String("pending")},
|
||||
},
|
||||
},
|
||||
{ // Request 3
|
||||
States: []*MockState{
|
||||
{State: aws.String("running")},
|
||||
{State: aws.String("running")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
numBuiltReq := 0
|
||||
svc.Handlers.Build.PushBack(func(r *request.Request) {
|
||||
numBuiltReq++
|
||||
})
|
||||
svc.Handlers.Unmarshal.PushBack(func(r *request.Request) {
|
||||
if reqNum >= len(resps) {
|
||||
assert.Fail(t, "too many polling requests made")
|
||||
return
|
||||
}
|
||||
r.Data = resps[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "Mock",
|
||||
Delay: 0,
|
||||
MaxAttempts: 10,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "path",
|
||||
Argument: "States[].State",
|
||||
Expected: "running",
|
||||
},
|
||||
},
|
||||
}
|
||||
w := waiter.Waiter{
|
||||
Client: svc,
|
||||
@ -135,13 +199,13 @@ func TestWaiterFailure(t *testing.T) {
|
||||
{State: aws.String("pending")},
|
||||
},
|
||||
},
|
||||
{ // Request 1
|
||||
{ // Request 2
|
||||
States: []*MockState{
|
||||
{State: aws.String("running")},
|
||||
{State: aws.String("pending")},
|
||||
},
|
||||
},
|
||||
{ // Request 1
|
||||
{ // Request 3
|
||||
States: []*MockState{
|
||||
{State: aws.String("running")},
|
||||
{State: aws.String("stopping")},
|
||||
@ -166,7 +230,20 @@ func TestWaiterFailure(t *testing.T) {
|
||||
Operation: "Mock",
|
||||
Delay: 0,
|
||||
MaxAttempts: 10,
|
||||
Acceptors: mockAcceptors,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "States[].State",
|
||||
Expected: "running",
|
||||
},
|
||||
{
|
||||
State: "failure",
|
||||
Matcher: "pathAny",
|
||||
Argument: "States[].State",
|
||||
Expected: "stopping",
|
||||
},
|
||||
},
|
||||
}
|
||||
w := waiter.Waiter{
|
||||
Client: svc,
|
||||
@ -181,3 +258,134 @@ func TestWaiterFailure(t *testing.T) {
|
||||
assert.Equal(t, 3, numBuiltReq)
|
||||
assert.Equal(t, 3, reqNum)
|
||||
}
|
||||
|
||||
func TestWaiterError(t *testing.T) {
|
||||
svc := &mockClient{Client: awstesting.NewClient(&aws.Config{
|
||||
Region: aws.String("mock-region"),
|
||||
})}
|
||||
svc.Handlers.Send.Clear() // mock sending
|
||||
svc.Handlers.Unmarshal.Clear()
|
||||
svc.Handlers.UnmarshalMeta.Clear()
|
||||
svc.Handlers.ValidateResponse.Clear()
|
||||
|
||||
reqNum := 0
|
||||
resps := []*MockOutput{
|
||||
{ // Request 1
|
||||
States: []*MockState{
|
||||
{State: aws.String("pending")},
|
||||
{State: aws.String("pending")},
|
||||
},
|
||||
},
|
||||
{ // Request 2, error case
|
||||
},
|
||||
{ // Request 3
|
||||
States: []*MockState{
|
||||
{State: aws.String("running")},
|
||||
{State: aws.String("running")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
numBuiltReq := 0
|
||||
svc.Handlers.Build.PushBack(func(r *request.Request) {
|
||||
numBuiltReq++
|
||||
})
|
||||
svc.Handlers.Send.PushBack(func(r *request.Request) {
|
||||
if reqNum == 1 {
|
||||
r.Error = awserr.New("MockException", "mock exception message", nil)
|
||||
r.HTTPResponse = &http.Response{
|
||||
StatusCode: 400,
|
||||
Status: http.StatusText(400),
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
reqNum++
|
||||
}
|
||||
})
|
||||
svc.Handlers.Unmarshal.PushBack(func(r *request.Request) {
|
||||
if reqNum >= len(resps) {
|
||||
assert.Fail(t, "too many polling requests made")
|
||||
return
|
||||
}
|
||||
r.Data = resps[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "Mock",
|
||||
Delay: 0,
|
||||
MaxAttempts: 10,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "pathAll",
|
||||
Argument: "States[].State",
|
||||
Expected: "running",
|
||||
},
|
||||
{
|
||||
State: "retry",
|
||||
Matcher: "error",
|
||||
Argument: "",
|
||||
Expected: "MockException",
|
||||
},
|
||||
},
|
||||
}
|
||||
w := waiter.Waiter{
|
||||
Client: svc,
|
||||
Input: &MockInput{},
|
||||
Config: waiterCfg,
|
||||
}
|
||||
|
||||
err := w.Wait()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, numBuiltReq)
|
||||
assert.Equal(t, 3, reqNum)
|
||||
}
|
||||
|
||||
func TestWaiterStatus(t *testing.T) {
|
||||
svc := &mockClient{Client: awstesting.NewClient(&aws.Config{
|
||||
Region: aws.String("mock-region"),
|
||||
})}
|
||||
svc.Handlers.Send.Clear() // mock sending
|
||||
svc.Handlers.Unmarshal.Clear()
|
||||
svc.Handlers.UnmarshalMeta.Clear()
|
||||
svc.Handlers.ValidateResponse.Clear()
|
||||
|
||||
reqNum := 0
|
||||
svc.Handlers.Build.PushBack(func(r *request.Request) {
|
||||
reqNum++
|
||||
})
|
||||
svc.Handlers.Send.PushBack(func(r *request.Request) {
|
||||
code := 200
|
||||
if reqNum == 3 {
|
||||
code = 404
|
||||
}
|
||||
r.HTTPResponse = &http.Response{
|
||||
StatusCode: code,
|
||||
Status: http.StatusText(code),
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
})
|
||||
|
||||
waiterCfg := waiter.Config{
|
||||
Operation: "Mock",
|
||||
Delay: 0,
|
||||
MaxAttempts: 10,
|
||||
Acceptors: []waiter.WaitAcceptor{
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "status",
|
||||
Argument: "",
|
||||
Expected: 404,
|
||||
},
|
||||
},
|
||||
}
|
||||
w := waiter.Waiter{
|
||||
Client: svc,
|
||||
Input: &MockInput{},
|
||||
Config: waiterCfg,
|
||||
}
|
||||
|
||||
err := w.Wait()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, reqNum)
|
||||
}
|
||||
|
202
Godeps/_workspace/src/github.com/bradfitz/gomemcache/LICENSE
generated
vendored
Normal file
202
Godeps/_workspace/src/github.com/bradfitz/gomemcache/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
669
Godeps/_workspace/src/github.com/bradfitz/gomemcache/memcache/memcache.go
generated
vendored
Normal file
669
Godeps/_workspace/src/github.com/bradfitz/gomemcache/memcache/memcache.go
generated
vendored
Normal file
@ -0,0 +1,669 @@
|
||||
/*
|
||||
Copyright 2011 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package memcache provides a client for the memcached cache server.
|
||||
package memcache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Similar to:
|
||||
// http://code.google.com/appengine/docs/go/memcache/reference.html
|
||||
|
||||
var (
|
||||
// ErrCacheMiss means that a Get failed because the item wasn't present.
|
||||
ErrCacheMiss = errors.New("memcache: cache miss")
|
||||
|
||||
// ErrCASConflict means that a CompareAndSwap call failed due to the
|
||||
// cached value being modified between the Get and the CompareAndSwap.
|
||||
// If the cached value was simply evicted rather than replaced,
|
||||
// ErrNotStored will be returned instead.
|
||||
ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
|
||||
|
||||
// ErrNotStored means that a conditional write operation (i.e. Add or
|
||||
// CompareAndSwap) failed because the condition was not satisfied.
|
||||
ErrNotStored = errors.New("memcache: item not stored")
|
||||
|
||||
// ErrServer means that a server error occurred.
|
||||
ErrServerError = errors.New("memcache: server error")
|
||||
|
||||
// ErrNoStats means that no statistics were available.
|
||||
ErrNoStats = errors.New("memcache: no statistics available")
|
||||
|
||||
// ErrMalformedKey is returned when an invalid key is used.
|
||||
// Keys must be at maximum 250 bytes long, ASCII, and not
|
||||
// contain whitespace or control characters.
|
||||
ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters")
|
||||
|
||||
// ErrNoServers is returned when no servers are configured or available.
|
||||
ErrNoServers = errors.New("memcache: no servers configured or available")
|
||||
)
|
||||
|
||||
// DefaultTimeout is the default socket read/write timeout.
|
||||
const DefaultTimeout = 100 * time.Millisecond
|
||||
|
||||
const (
|
||||
buffered = 8 // arbitrary buffered channel size, for readability
|
||||
maxIdleConnsPerAddr = 2 // TODO(bradfitz): make this configurable?
|
||||
)
|
||||
|
||||
// resumableError returns true if err is only a protocol-level cache error.
|
||||
// This is used to determine whether or not a server connection should
|
||||
// be re-used or not. If an error occurs, by default we don't reuse the
|
||||
// connection, unless it was just a cache error.
|
||||
func resumableError(err error) bool {
|
||||
switch err {
|
||||
case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func legalKey(key string) bool {
|
||||
if len(key) > 250 {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(key); i++ {
|
||||
if key[i] <= ' ' || key[i] > 0x7e {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
crlf = []byte("\r\n")
|
||||
space = []byte(" ")
|
||||
resultOK = []byte("OK\r\n")
|
||||
resultStored = []byte("STORED\r\n")
|
||||
resultNotStored = []byte("NOT_STORED\r\n")
|
||||
resultExists = []byte("EXISTS\r\n")
|
||||
resultNotFound = []byte("NOT_FOUND\r\n")
|
||||
resultDeleted = []byte("DELETED\r\n")
|
||||
resultEnd = []byte("END\r\n")
|
||||
resultOk = []byte("OK\r\n")
|
||||
resultTouched = []byte("TOUCHED\r\n")
|
||||
|
||||
resultClientErrorPrefix = []byte("CLIENT_ERROR ")
|
||||
)
|
||||
|
||||
// New returns a memcache client using the provided server(s)
|
||||
// with equal weight. If a server is listed multiple times,
|
||||
// it gets a proportional amount of weight.
|
||||
func New(server ...string) *Client {
|
||||
ss := new(ServerList)
|
||||
ss.SetServers(server...)
|
||||
return NewFromSelector(ss)
|
||||
}
|
||||
|
||||
// NewFromSelector returns a new Client using the provided ServerSelector.
|
||||
func NewFromSelector(ss ServerSelector) *Client {
|
||||
return &Client{selector: ss}
|
||||
}
|
||||
|
||||
// Client is a memcache client.
|
||||
// It is safe for unlocked use by multiple concurrent goroutines.
|
||||
type Client struct {
|
||||
// Timeout specifies the socket read/write timeout.
|
||||
// If zero, DefaultTimeout is used.
|
||||
Timeout time.Duration
|
||||
|
||||
selector ServerSelector
|
||||
|
||||
lk sync.Mutex
|
||||
freeconn map[string][]*conn
|
||||
}
|
||||
|
||||
// Item is an item to be got or stored in a memcached server.
|
||||
type Item struct {
|
||||
// Key is the Item's key (250 bytes maximum).
|
||||
Key string
|
||||
|
||||
// Value is the Item's value.
|
||||
Value []byte
|
||||
|
||||
// Object is the Item's value for use with a Codec.
|
||||
Object interface{}
|
||||
|
||||
// Flags are server-opaque flags whose semantics are entirely
|
||||
// up to the app.
|
||||
Flags uint32
|
||||
|
||||
// Expiration is the cache expiration time, in seconds: either a relative
|
||||
// time from now (up to 1 month), or an absolute Unix epoch time.
|
||||
// Zero means the Item has no expiration time.
|
||||
Expiration int32
|
||||
|
||||
// Compare and swap ID.
|
||||
casid uint64
|
||||
}
|
||||
|
||||
// conn is a connection to a server.
|
||||
type conn struct {
|
||||
nc net.Conn
|
||||
rw *bufio.ReadWriter
|
||||
addr net.Addr
|
||||
c *Client
|
||||
}
|
||||
|
||||
// release returns this connection back to the client's free pool
|
||||
func (cn *conn) release() {
|
||||
cn.c.putFreeConn(cn.addr, cn)
|
||||
}
|
||||
|
||||
func (cn *conn) extendDeadline() {
|
||||
cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout()))
|
||||
}
|
||||
|
||||
// condRelease releases this connection if the error pointed to by err
|
||||
// is nil (not an error) or is only a protocol level error (e.g. a
|
||||
// cache miss). The purpose is to not recycle TCP connections that
|
||||
// are bad.
|
||||
func (cn *conn) condRelease(err *error) {
|
||||
if *err == nil || resumableError(*err) {
|
||||
cn.release()
|
||||
} else {
|
||||
cn.nc.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
|
||||
c.lk.Lock()
|
||||
defer c.lk.Unlock()
|
||||
if c.freeconn == nil {
|
||||
c.freeconn = make(map[string][]*conn)
|
||||
}
|
||||
freelist := c.freeconn[addr.String()]
|
||||
if len(freelist) >= maxIdleConnsPerAddr {
|
||||
cn.nc.Close()
|
||||
return
|
||||
}
|
||||
c.freeconn[addr.String()] = append(freelist, cn)
|
||||
}
|
||||
|
||||
func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) {
|
||||
c.lk.Lock()
|
||||
defer c.lk.Unlock()
|
||||
if c.freeconn == nil {
|
||||
return nil, false
|
||||
}
|
||||
freelist, ok := c.freeconn[addr.String()]
|
||||
if !ok || len(freelist) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
cn = freelist[len(freelist)-1]
|
||||
c.freeconn[addr.String()] = freelist[:len(freelist)-1]
|
||||
return cn, true
|
||||
}
|
||||
|
||||
func (c *Client) netTimeout() time.Duration {
|
||||
if c.Timeout != 0 {
|
||||
return c.Timeout
|
||||
}
|
||||
return DefaultTimeout
|
||||
}
|
||||
|
||||
// ConnectTimeoutError is the error type used when it takes
|
||||
// too long to connect to the desired host. This level of
|
||||
// detail can generally be ignored.
|
||||
type ConnectTimeoutError struct {
|
||||
Addr net.Addr
|
||||
}
|
||||
|
||||
func (cte *ConnectTimeoutError) Error() string {
|
||||
return "memcache: connect timeout to " + cte.Addr.String()
|
||||
}
|
||||
|
||||
func (c *Client) dial(addr net.Addr) (net.Conn, error) {
|
||||
type connError struct {
|
||||
cn net.Conn
|
||||
err error
|
||||
}
|
||||
|
||||
nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout())
|
||||
if err == nil {
|
||||
return nc, nil
|
||||
}
|
||||
|
||||
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||
return nil, &ConnectTimeoutError{addr}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (c *Client) getConn(addr net.Addr) (*conn, error) {
|
||||
cn, ok := c.getFreeConn(addr)
|
||||
if ok {
|
||||
cn.extendDeadline()
|
||||
return cn, nil
|
||||
}
|
||||
nc, err := c.dial(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cn = &conn{
|
||||
nc: nc,
|
||||
addr: addr,
|
||||
rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)),
|
||||
c: c,
|
||||
}
|
||||
cn.extendDeadline()
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error {
|
||||
addr, err := c.selector.PickServer(item.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cn, err := c.getConn(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cn.condRelease(&err)
|
||||
if err = fn(c, cn.rw, item); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) FlushAll() error {
|
||||
return c.selector.Each(c.flushAllFromAddr)
|
||||
}
|
||||
|
||||
// Get gets the item for the given key. ErrCacheMiss is returned for a
|
||||
// memcache cache miss. The key must be at most 250 bytes in length.
|
||||
func (c *Client) Get(key string) (item *Item, err error) {
|
||||
err = c.withKeyAddr(key, func(addr net.Addr) error {
|
||||
return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it })
|
||||
})
|
||||
if err == nil && item == nil {
|
||||
err = ErrCacheMiss
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Touch updates the expiry for the given key. The seconds parameter is either
|
||||
// a Unix timestamp or, if seconds is less than 1 month, the number of seconds
|
||||
// into the future at which time the item will expire. ErrCacheMiss is returned if the
|
||||
// key is not in the cache. The key must be at most 250 bytes in length.
|
||||
func (c *Client) Touch(key string, seconds int32) (err error) {
|
||||
return c.withKeyAddr(key, func(addr net.Addr) error {
|
||||
return c.touchFromAddr(addr, []string{key}, seconds)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) {
|
||||
if !legalKey(key) {
|
||||
return ErrMalformedKey
|
||||
}
|
||||
addr, err := c.selector.PickServer(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fn(addr)
|
||||
}
|
||||
|
||||
func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) {
|
||||
cn, err := c.getConn(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cn.condRelease(&err)
|
||||
return fn(cn.rw)
|
||||
}
|
||||
|
||||
func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error {
|
||||
return c.withKeyAddr(key, func(addr net.Addr) error {
|
||||
return c.withAddrRw(addr, fn)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error {
|
||||
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
|
||||
if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := parseGetResponse(rw.Reader, cb); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// flushAllFromAddr send the flush_all command to the given addr
|
||||
func (c *Client) flushAllFromAddr(addr net.Addr) error {
|
||||
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
|
||||
if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
line, err := rw.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case bytes.Equal(line, resultOk):
|
||||
break
|
||||
default:
|
||||
return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error {
|
||||
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
|
||||
for _, key := range keys {
|
||||
if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
line, err := rw.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case bytes.Equal(line, resultTouched):
|
||||
break
|
||||
case bytes.Equal(line, resultNotFound):
|
||||
return ErrCacheMiss
|
||||
default:
|
||||
return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetMulti is a batch version of Get. The returned map from keys to
|
||||
// items may have fewer elements than the input slice, due to memcache
|
||||
// cache misses. Each key must be at most 250 bytes in length.
|
||||
// If no error is returned, the returned map will also be non-nil.
|
||||
func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
|
||||
var lk sync.Mutex
|
||||
m := make(map[string]*Item)
|
||||
addItemToMap := func(it *Item) {
|
||||
lk.Lock()
|
||||
defer lk.Unlock()
|
||||
m[it.Key] = it
|
||||
}
|
||||
|
||||
keyMap := make(map[net.Addr][]string)
|
||||
for _, key := range keys {
|
||||
if !legalKey(key) {
|
||||
return nil, ErrMalformedKey
|
||||
}
|
||||
addr, err := c.selector.PickServer(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyMap[addr] = append(keyMap[addr], key)
|
||||
}
|
||||
|
||||
ch := make(chan error, buffered)
|
||||
for addr, keys := range keyMap {
|
||||
go func(addr net.Addr, keys []string) {
|
||||
ch <- c.getFromAddr(addr, keys, addItemToMap)
|
||||
}(addr, keys)
|
||||
}
|
||||
|
||||
var err error
|
||||
for _ = range keyMap {
|
||||
if ge := <-ch; ge != nil {
|
||||
err = ge
|
||||
}
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// parseGetResponse reads a GET response from r and calls cb for each
|
||||
// read and allocated Item
|
||||
func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
|
||||
for {
|
||||
line, err := r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytes.Equal(line, resultEnd) {
|
||||
return nil
|
||||
}
|
||||
it := new(Item)
|
||||
size, err := scanGetResponseLine(line, it)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
it.Value, err = ioutil.ReadAll(io.LimitReader(r, int64(size)+2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.HasSuffix(it.Value, crlf) {
|
||||
return fmt.Errorf("memcache: corrupt get result read")
|
||||
}
|
||||
it.Value = it.Value[:size]
|
||||
cb(it)
|
||||
}
|
||||
}
|
||||
|
||||
// scanGetResponseLine populates it and returns the declared size of the item.
|
||||
// It does not read the bytes of the item.
|
||||
func scanGetResponseLine(line []byte, it *Item) (size int, err error) {
|
||||
pattern := "VALUE %s %d %d %d\r\n"
|
||||
dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid}
|
||||
if bytes.Count(line, space) == 3 {
|
||||
pattern = "VALUE %s %d %d\r\n"
|
||||
dest = dest[:3]
|
||||
}
|
||||
n, err := fmt.Sscanf(string(line), pattern, dest...)
|
||||
if err != nil || n != len(dest) {
|
||||
return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line)
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// Set writes the given item, unconditionally.
|
||||
func (c *Client) Set(item *Item) error {
|
||||
return c.onItem(item, (*Client).set)
|
||||
}
|
||||
|
||||
func (c *Client) set(rw *bufio.ReadWriter, item *Item) error {
|
||||
return c.populateOne(rw, "set", item)
|
||||
}
|
||||
|
||||
// Add writes the given item, if no value already exists for its
|
||||
// key. ErrNotStored is returned if that condition is not met.
|
||||
func (c *Client) Add(item *Item) error {
|
||||
return c.onItem(item, (*Client).add)
|
||||
}
|
||||
|
||||
func (c *Client) add(rw *bufio.ReadWriter, item *Item) error {
|
||||
return c.populateOne(rw, "add", item)
|
||||
}
|
||||
|
||||
// Replace writes the given item, but only if the server *does*
|
||||
// already hold data for this key
|
||||
func (c *Client) Replace(item *Item) error {
|
||||
return c.onItem(item, (*Client).replace)
|
||||
}
|
||||
|
||||
func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error {
|
||||
return c.populateOne(rw, "replace", item)
|
||||
}
|
||||
|
||||
// CompareAndSwap writes the given item that was previously returned
|
||||
// by Get, if the value was neither modified or evicted between the
|
||||
// Get and the CompareAndSwap calls. The item's Key should not change
|
||||
// between calls but all other item fields may differ. ErrCASConflict
|
||||
// is returned if the value was modified in between the
|
||||
// calls. ErrNotStored is returned if the value was evicted in between
|
||||
// the calls.
|
||||
func (c *Client) CompareAndSwap(item *Item) error {
|
||||
return c.onItem(item, (*Client).cas)
|
||||
}
|
||||
|
||||
func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error {
|
||||
return c.populateOne(rw, "cas", item)
|
||||
}
|
||||
|
||||
func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error {
|
||||
if !legalKey(item.Key) {
|
||||
return ErrMalformedKey
|
||||
}
|
||||
var err error
|
||||
if verb == "cas" {
|
||||
_, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n",
|
||||
verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n",
|
||||
verb, item.Key, item.Flags, item.Expiration, len(item.Value))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = rw.Write(item.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := rw.Write(crlf); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
line, err := rw.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case bytes.Equal(line, resultStored):
|
||||
return nil
|
||||
case bytes.Equal(line, resultNotStored):
|
||||
return ErrNotStored
|
||||
case bytes.Equal(line, resultExists):
|
||||
return ErrCASConflict
|
||||
case bytes.Equal(line, resultNotFound):
|
||||
return ErrCacheMiss
|
||||
}
|
||||
return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line))
|
||||
}
|
||||
|
||||
func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) {
|
||||
_, err := fmt.Fprintf(rw, format, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rw.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line, err := rw.ReadSlice('\n')
|
||||
return line, err
|
||||
}
|
||||
|
||||
func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error {
|
||||
line, err := writeReadLine(rw, format, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case bytes.Equal(line, resultOK):
|
||||
return nil
|
||||
case bytes.Equal(line, expect):
|
||||
return nil
|
||||
case bytes.Equal(line, resultNotStored):
|
||||
return ErrNotStored
|
||||
case bytes.Equal(line, resultExists):
|
||||
return ErrCASConflict
|
||||
case bytes.Equal(line, resultNotFound):
|
||||
return ErrCacheMiss
|
||||
}
|
||||
return fmt.Errorf("memcache: unexpected response line: %q", string(line))
|
||||
}
|
||||
|
||||
// Delete deletes the item with the provided key. The error ErrCacheMiss is
|
||||
// returned if the item didn't already exist in the cache.
|
||||
func (c *Client) Delete(key string) error {
|
||||
return c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
|
||||
return writeExpectf(rw, resultDeleted, "delete %s\r\n", key)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteAll deletes all items in the cache.
|
||||
func (c *Client) DeleteAll() error {
|
||||
return c.withKeyRw("", func(rw *bufio.ReadWriter) error {
|
||||
return writeExpectf(rw, resultDeleted, "flush_all\r\n")
|
||||
})
|
||||
}
|
||||
|
||||
// Increment atomically increments key by delta. The return value is
|
||||
// the new value after being incremented or an error. If the value
|
||||
// didn't exist in memcached the error is ErrCacheMiss. The value in
|
||||
// memcached must be an decimal number, or an error will be returned.
|
||||
// On 64-bit overflow, the new value wraps around.
|
||||
func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
|
||||
return c.incrDecr("incr", key, delta)
|
||||
}
|
||||
|
||||
// Decrement atomically decrements key by delta. The return value is
|
||||
// the new value after being decremented or an error. If the value
|
||||
// didn't exist in memcached the error is ErrCacheMiss. The value in
|
||||
// memcached must be an decimal number, or an error will be returned.
|
||||
// On underflow, the new value is capped at zero and does not wrap
|
||||
// around.
|
||||
func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
|
||||
return c.incrDecr("decr", key, delta)
|
||||
}
|
||||
|
||||
func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
|
||||
var val uint64
|
||||
err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
|
||||
line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case bytes.Equal(line, resultNotFound):
|
||||
return ErrCacheMiss
|
||||
case bytes.HasPrefix(line, resultClientErrorPrefix):
|
||||
errMsg := line[len(resultClientErrorPrefix) : len(line)-2]
|
||||
return errors.New("memcache: client error: " + string(errMsg))
|
||||
}
|
||||
val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return val, err
|
||||
}
|
114
Godeps/_workspace/src/github.com/bradfitz/gomemcache/memcache/selector.go
generated
vendored
Normal file
114
Godeps/_workspace/src/github.com/bradfitz/gomemcache/memcache/selector.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright 2011 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package memcache
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ServerSelector is the interface that selects a memcache server
|
||||
// as a function of the item's key.
|
||||
//
|
||||
// All ServerSelector implementations must be safe for concurrent use
|
||||
// by multiple goroutines.
|
||||
type ServerSelector interface {
|
||||
// PickServer returns the server address that a given item
|
||||
// should be shared onto.
|
||||
PickServer(key string) (net.Addr, error)
|
||||
Each(func(net.Addr) error) error
|
||||
}
|
||||
|
||||
// ServerList is a simple ServerSelector. Its zero value is usable.
|
||||
type ServerList struct {
|
||||
mu sync.RWMutex
|
||||
addrs []net.Addr
|
||||
}
|
||||
|
||||
// SetServers changes a ServerList's set of servers at runtime and is
|
||||
// safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// Each server is given equal weight. A server is given more weight
|
||||
// if it's listed multiple times.
|
||||
//
|
||||
// SetServers returns an error if any of the server names fail to
|
||||
// resolve. No attempt is made to connect to the server. If any error
|
||||
// is returned, no changes are made to the ServerList.
|
||||
func (ss *ServerList) SetServers(servers ...string) error {
|
||||
naddr := make([]net.Addr, len(servers))
|
||||
for i, server := range servers {
|
||||
if strings.Contains(server, "/") {
|
||||
addr, err := net.ResolveUnixAddr("unix", server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
naddr[i] = addr
|
||||
} else {
|
||||
tcpaddr, err := net.ResolveTCPAddr("tcp", server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
naddr[i] = tcpaddr
|
||||
}
|
||||
}
|
||||
|
||||
ss.mu.Lock()
|
||||
defer ss.mu.Unlock()
|
||||
ss.addrs = naddr
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each iterates over each server calling the given function
|
||||
func (ss *ServerList) Each(f func(net.Addr) error) error {
|
||||
ss.mu.RLock()
|
||||
defer ss.mu.RUnlock()
|
||||
for _, a := range ss.addrs {
|
||||
if err := f(a); nil != err {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// keyBufPool returns []byte buffers for use by PickServer's call to
|
||||
// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the
|
||||
// copies, which at least are bounded in size and small)
|
||||
var keyBufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, 256)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
|
||||
func (ss *ServerList) PickServer(key string) (net.Addr, error) {
|
||||
ss.mu.RLock()
|
||||
defer ss.mu.RUnlock()
|
||||
if len(ss.addrs) == 0 {
|
||||
return nil, ErrNoServers
|
||||
}
|
||||
if len(ss.addrs) == 1 {
|
||||
return ss.addrs[0], nil
|
||||
}
|
||||
bufp := keyBufPool.Get().(*[]byte)
|
||||
n := copy(*bufp, key)
|
||||
cs := crc32.ChecksumIEEE((*bufp)[:n])
|
||||
keyBufPool.Put(bufp)
|
||||
|
||||
return ss.addrs[cs%uint32(len(ss.addrs))], nil
|
||||
}
|
@ -90,7 +90,7 @@ Replace X.Y.Z by actual version number.
|
||||
cd $GOPATH/src/github.com/grafana/grafana
|
||||
go run build.go setup (only needed once to install godep)
|
||||
godep restore (will pull down all golang lib dependencies in your current GOPATH)
|
||||
godep go run build.go build
|
||||
go run build.go build
|
||||
```
|
||||
|
||||
### Building frontend assets
|
||||
|
@ -5,7 +5,7 @@ os: Windows Server 2012 R2
|
||||
clone_folder: c:\gopath\src\github.com\grafana\grafana
|
||||
|
||||
environment:
|
||||
nodejs_version: "0.12.2"
|
||||
nodejs_version: "4"
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
|
19
build.go
19
build.go
@ -76,6 +76,14 @@ func main() {
|
||||
grunt("release")
|
||||
createLinuxPackages()
|
||||
|
||||
case "pkg-rpm":
|
||||
grunt("release")
|
||||
createRpmPackages()
|
||||
|
||||
case "pkg-deb":
|
||||
grunt("release")
|
||||
createDebPackages()
|
||||
|
||||
case "latest":
|
||||
makeLatestDistCopies()
|
||||
|
||||
@ -147,7 +155,7 @@ type linuxPackageOptions struct {
|
||||
depends []string
|
||||
}
|
||||
|
||||
func createLinuxPackages() {
|
||||
func createDebPackages() {
|
||||
createPackage(linuxPackageOptions{
|
||||
packageType: "deb",
|
||||
homeDir: "/usr/share/grafana",
|
||||
@ -167,7 +175,9 @@ func createLinuxPackages() {
|
||||
|
||||
depends: []string{"adduser", "libfontconfig"},
|
||||
})
|
||||
}
|
||||
|
||||
func createRpmPackages() {
|
||||
createPackage(linuxPackageOptions{
|
||||
packageType: "rpm",
|
||||
homeDir: "/usr/share/grafana",
|
||||
@ -189,6 +199,11 @@ func createLinuxPackages() {
|
||||
})
|
||||
}
|
||||
|
||||
func createLinuxPackages() {
|
||||
createDebPackages()
|
||||
createRpmPackages()
|
||||
}
|
||||
|
||||
func createPackage(options linuxPackageOptions) {
|
||||
packageRoot, _ := ioutil.TempDir("", "grafana-linux-pack")
|
||||
|
||||
@ -315,6 +330,8 @@ func build(pkg string, tags []string) {
|
||||
args = append(args, "-o", binary)
|
||||
args = append(args, pkg)
|
||||
setBuildEnv()
|
||||
|
||||
runPrint("go", "version")
|
||||
runPrint("go", args...)
|
||||
|
||||
// Create an md5 checksum of the binary, to be included in the archive for
|
||||
|
@ -15,6 +15,10 @@ data = data
|
||||
# Directory where grafana can store logs
|
||||
#
|
||||
logs = data/log
|
||||
#
|
||||
# Directory where grafana will automatically scan and look for plugins
|
||||
#
|
||||
plugins = data/plugins
|
||||
|
||||
#################################### Server ####################################
|
||||
[server]
|
||||
@ -67,7 +71,7 @@ path = grafana.db
|
||||
|
||||
#################################### Session ####################################
|
||||
[session]
|
||||
# Either "memory", "file", "redis", "mysql", "postgres", default is "file"
|
||||
# Either "memory", "file", "redis", "mysql", "postgres", "memcache", default is "file"
|
||||
provider = file
|
||||
|
||||
# Provider config options
|
||||
@ -76,6 +80,8 @@ provider = file
|
||||
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
|
||||
# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
|
||||
# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
|
||||
# memcache: 127.0.0.1:11211
|
||||
|
||||
|
||||
provider_config = sessions
|
||||
|
||||
@ -125,6 +131,12 @@ disable_gravatar = false
|
||||
# data source proxy whitelist (ip_or_domain:port seperated by spaces)
|
||||
data_source_proxy_whitelist =
|
||||
|
||||
[snapshots]
|
||||
# snapshot sharing options
|
||||
external_enabled = true
|
||||
external_snapshot_url = https://snapshots-origin.raintank.io
|
||||
external_snapshot_name = Publish to snapshot.raintank.io
|
||||
|
||||
#################################### Users ####################################
|
||||
[users]
|
||||
# disable user signup / registration
|
||||
@ -142,6 +154,9 @@ auto_assign_org_role = Viewer
|
||||
# Require email validation before sign up completes
|
||||
verify_email_enabled = false
|
||||
|
||||
# Background text for the user field on the login page
|
||||
login_hint = email or username
|
||||
|
||||
#################################### Anonymous Auth ##########################
|
||||
[auth.anonymous]
|
||||
# enable anonymous access
|
||||
@ -245,6 +260,18 @@ daily_rotate = true
|
||||
# Expired days of log file(delete after max days), default is 7
|
||||
max_days = 7
|
||||
|
||||
[log.syslog]
|
||||
level =
|
||||
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
|
||||
network =
|
||||
address =
|
||||
|
||||
# Syslog facility. user, daemon and local0 through local7 are valid.
|
||||
facility =
|
||||
|
||||
# Syslog tag. By default, the process' argv[0] is used.
|
||||
tag =
|
||||
|
||||
#################################### AMPQ Event Publisher ##########################
|
||||
[event_publisher]
|
||||
enabled = false
|
||||
|
@ -15,7 +15,12 @@
|
||||
# Directory where grafana can store logs
|
||||
#
|
||||
;logs = /var/log/grafana
|
||||
#
|
||||
# Directory where grafana will automatically scan and look for plugins
|
||||
#
|
||||
;plugins = /var/lib/grafana/plugins
|
||||
|
||||
#
|
||||
#################################### Server ####################################
|
||||
[server]
|
||||
# Protocol (http or https)
|
||||
@ -120,6 +125,12 @@
|
||||
# data source proxy whitelist (ip_or_domain:port seperated by spaces)
|
||||
;data_source_proxy_whitelist =
|
||||
|
||||
[snapshots]
|
||||
# snapshot sharing options
|
||||
;external_enabled = true
|
||||
;external_snapshot_url = https://snapshots-origin.raintank.io
|
||||
;external_snapshot_name = Publish to snapshot.raintank.io
|
||||
|
||||
#################################### Users ####################################
|
||||
[users]
|
||||
# disable user signup / registration
|
||||
@ -134,6 +145,9 @@
|
||||
# Default role new users will be automatically assigned (if disabled above is set to true)
|
||||
;auto_assign_org_role = Viewer
|
||||
|
||||
# Background text for the user field on the login page
|
||||
;login_hint = email or username
|
||||
|
||||
#################################### Anonymous Auth ##########################
|
||||
[auth.anonymous]
|
||||
# enable anonymous access
|
||||
|
@ -1 +1 @@
|
||||
2.5.0
|
||||
2.6.0
|
||||
|
@ -45,6 +45,7 @@ pages:
|
||||
|
||||
- ['guides/basic_concepts.md', 'User Guides', 'Basic Concepts']
|
||||
- ['guides/gettingstarted.md', 'User Guides', 'Getting Started']
|
||||
- ['guides/whats-new-in-v2-6.md', 'User Guides', "What's New in Grafana v2.6"]
|
||||
- ['guides/whats-new-in-v2-5.md', 'User Guides', "What's New in Grafana v2.5"]
|
||||
- ['guides/whats-new-in-v2-1.md', 'User Guides', "What's New in Grafana v2.1"]
|
||||
- ['guides/whats-new-in-v2.md', 'User Guides', "What's New in Grafana v2.0"]
|
||||
@ -52,13 +53,14 @@ pages:
|
||||
|
||||
- ['reference/graph.md', 'Reference', 'Graph Panel']
|
||||
- ['reference/singlestat.md', 'Reference', 'Singlestat Panel']
|
||||
- ['reference/table_panel.md', 'Reference', 'Table Panel']
|
||||
- ['reference/dashlist.md', 'Reference', 'Dashboard List Panel']
|
||||
- ['reference/sharing.md', 'Reference', 'Sharing']
|
||||
- ['reference/annotations.md', 'Reference', 'Annotations']
|
||||
- ['reference/timerange.md', 'Reference', 'Time Range Controls']
|
||||
- ['reference/search.md', 'Reference', 'Dashboard Search']
|
||||
- ['reference/templating.md', 'Reference', 'Templated Dashboards']
|
||||
- ['reference/scripting.md', 'Reference', 'Scripted Dashboards']
|
||||
- ['reference/timerange.md', 'Reference', 'Time Range']
|
||||
- ['reference/search.md', 'Reference', 'Search']
|
||||
- ['reference/templating.md', 'Reference', 'Templating']
|
||||
- ['reference/scripting.md', 'Reference', 'Scripting']
|
||||
- ['reference/playlist.md', 'Reference', 'Playlist']
|
||||
- ['reference/export_import.md', 'Reference', 'Import & Export']
|
||||
- ['reference/admin.md', 'Reference', 'Administration']
|
||||
|
@ -63,15 +63,10 @@ Name | Description
|
||||
`namespaces()` | Returns a list of namespaces CloudWatch support.
|
||||
`metrics(namespace)` | Returns a list of metrics in the namespace.
|
||||
`dimension_keys(namespace)` | Returns a list of dimension keys in the namespace.
|
||||
`dimension_values(region, namespace, metric)` | Returns a list of dimension values matching the specified `region`, `namespace` and `metric`.
|
||||
`dimension_values(region, namespace, metric, dimension_key)` | Returns a list of dimension values matching the specified `region`, `namespace`, `metric` and `dimension_key`.
|
||||
|
||||
For details about the metrics CloudWatch provides, please refer to the [CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
|
||||
|
||||
If you want to filter dimension values by other dimension key/value pair, you can specify optional parameter like this.
|
||||
```sql
|
||||
dimension_values(region, namespace, metric, dim_key1=dim_val1,dim_key2=dim_val2,...)
|
||||
```
|
||||
|
||||

|
||||
|
||||
## Cost
|
||||
|
@ -53,6 +53,35 @@ a time pattern for the index name or a wildcard.
|
||||
The Elasticsearch query editor allows you to select multiple metrics and group by multiple terms or filters. Use the plus and minus icons to the right to add / remove
|
||||
metrics or group bys. Some metrics and group by have options, click the option text to expand the the row to view and edit metric or group by options.
|
||||
|
||||
## Pipeline metrics
|
||||
|
||||
If you have Elasticsearch 2.x and Grafana 2.6 or above then you can use pipeline metric aggregations like
|
||||
**Moving Average** and **Derivative**. Elasticsearch pipeline metrics require another metric to be based on. Use the eye icon next to the metric
|
||||
to hide metrics from appearing in the graph. This is useful for metrics you only have in the query to be used
|
||||
in a pipeline metric.
|
||||
|
||||

|
||||
|
||||
## Templating
|
||||
|
||||
The Elasticsearch datasource supports two types of queries you can use to fill template variables with values.
|
||||
|
||||
### Possible values for a field
|
||||
|
||||
```json
|
||||
{"find": "terms", "field": "@hostname"}
|
||||
```
|
||||
|
||||
### Fields filtered by type
|
||||
```json
|
||||
{"find": "fields", "type": "string"}
|
||||
```
|
||||
|
||||
### Multi format / All format
|
||||
Use lucene format.
|
||||
|
||||
|
||||
|
||||
## Annotations
|
||||
TODO
|
||||
|
||||
|
@ -38,29 +38,47 @@ Password | Database user's password
|
||||
> Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source.
|
||||
|
||||
|
||||
## InfluxDB 0.9.x
|
||||
## Query Editor
|
||||
|
||||

|
||||

|
||||
|
||||
You find the InfluxDB editor in the metrics tab in Graph or Singlestat panel's edit mode. You enter edit mode by clicking the
|
||||
panel title, then edit. The editor allows you to select metrics and tags.
|
||||
|
||||
### Editor tag filters
|
||||
### Filter data (WHERE)
|
||||
To add a tag filter click the plus icon to the right of the `WHERE` condition. You can remove tag filters by clicking on
|
||||
the tag key and select `--remove tag filter--`.
|
||||
|
||||
### Regex matching
|
||||
**Regex matching**
|
||||
|
||||
You can type in regex patterns for metric names or tag filter values, be sure to wrap the regex pattern in forward slashes (`/`). Grafana
|
||||
will automatically adjust the filter tag condition to use the InfluxDB regex match condition operator (`=~`).
|
||||
|
||||
### Editor group by
|
||||
To group by a tag click the plus icon after the `GROUP BY ($interval)` text. Pick a tag from the dropdown that appears.
|
||||
You can remove the group by by clicking on the tag and then select `--remove group by--` from the dropdown.
|
||||
### Field & Aggregation functions
|
||||
In the `SELECT` row you can specify what fields and functions you want to use. If you have a
|
||||
group by time you need an aggregation function. Some functions like derivative require an aggregation function.
|
||||
|
||||
### Editor RAW Query
|
||||
You can switch to raw query mode by pressing the pen icon.
|
||||
The editor tries simplify and unify this part of the query. For example:
|
||||

|
||||
|
||||
> If you use Raw Query be sure your query at minimum have `WHERE $timeFilter` clause and ends with `order by asc`.
|
||||
The above will generate the following InfluxDB `SELECT` clause:
|
||||
|
||||
```sql
|
||||
SELECT derivative(mean("value"), 10s) /10 AS "REQ/s" FROM ....
|
||||
```
|
||||
|
||||
#### Select multiple fields
|
||||
Use the plus button and select Field > field to add another SELECT clause. You can also
|
||||
specify an asterix `*` to select all fields.
|
||||
|
||||
### Group By
|
||||
To group by a tag click the plus icon at the end of the GROUP BY row. Pick a tag from the dropdown that appears.
|
||||
You can remove the group by by clicking on the `tag` and then click on the x icon.
|
||||
|
||||
### Text Editor Mode (RAW)
|
||||
You can switch to raw query mode by clicking hamburger icon and then `Switch editor mode`.
|
||||
|
||||
> If you use Raw Query be sure your query at minimum have `WHERE $timeFilter`
|
||||
> Also please always have a group by time and an aggregation function, otherwise InfluxDB can easily return hundreds of thousands
|
||||
> of data points that will hang the browser.
|
||||
|
||||
@ -72,7 +90,15 @@ You can switch to raw query mode by pressing the pen icon.
|
||||
- $tag_hostname = replaced with the value of the hostname tag
|
||||
- You can also use [[tag_hostname]] pattern replacement syntax
|
||||
|
||||
### Templating
|
||||
### Table query / raw data
|
||||
|
||||

|
||||
|
||||
You can remove the group by time by clicking on the `time` part and then the `x` icon. You can
|
||||
change the option `Format As` to `Table` if you want to show raw data in the `Table` panel.
|
||||
|
||||
|
||||
## Templating
|
||||
You can create a template variable in Grafana and have that variable filled with values from any InfluxDB metric exploration query.
|
||||
You can then use this variable in your InfluxDB metric queries.
|
||||
|
||||
@ -93,7 +119,7 @@ SHOW TAG VALUES WITH KEY = "hostname" WHERE region =~ /$region/
|
||||
|
||||

|
||||
|
||||
### Annotations
|
||||
## Annotations
|
||||
Annotations allows you to overlay rich event information on top of graphs.
|
||||
|
||||
An example query:
|
||||
@ -102,10 +128,4 @@ An example query:
|
||||
SELECT title, description from events WHERE $timeFilter order asc
|
||||
```
|
||||
|
||||
### InfluxDB 0.8.x
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
|
123
docs/sources/guides/whats-new-in-v2-6.md
Normal file
123
docs/sources/guides/whats-new-in-v2-6.md
Normal file
@ -0,0 +1,123 @@
|
||||
---
|
||||
page_title: What's New in Grafana v2.6
|
||||
page_description: What's new in Grafana v2.6
|
||||
page_keywords: grafana, new, changes, features, documentation, table
|
||||
---
|
||||
|
||||
# What's new in Grafana v2.6
|
||||
|
||||
## Release highlights
|
||||
The release includes a new Table panel, a new InfluxDB query editor, support for Elasticsearch Pipeline Metrics and
|
||||
support for multiple Cloudwatch credentials.
|
||||
|
||||
## Table Panel
|
||||
<img src="/img/v2/table-panel.png">
|
||||
|
||||
The new table panel is very flexible, supporting both multiple modes for time series as well as for
|
||||
table, annotation and raw JSON data. It also provides date formating and value formating and coloring options.
|
||||
|
||||
### Time series to rows
|
||||
|
||||
In the most simple mode you can turn time series to rows. This means you get a `Time`, `Metric` and a `Value` column.
|
||||
Where `Metric` is the name of the time series.
|
||||
|
||||
<img src="/img/v2.6/table_ts_to_rows.png">
|
||||
|
||||
### Table Transform
|
||||
Above you see the options tab for the **Table Panel**. The most important option is the `To Table Transform`.
|
||||
This option controls how the result of the metric/data query is turned into a table.
|
||||
|
||||
### Column Styles
|
||||
The column styles allow you control how dates and numbers are formatted.
|
||||
|
||||
### Time series to columns
|
||||
This transform allows you to take multiple time series and group them by time. Which will result in a `Time` column
|
||||
and a column for each time series.
|
||||
|
||||
<img src="/img/v2.6/table_ts_to_columns.png">
|
||||
|
||||
In the screenshot above you can see how the same time series query as in the previous example can be transformed into
|
||||
a different table by changing the `To Table Transform` to `Time series to columns`.
|
||||
|
||||
### Time series to aggregations
|
||||
This transform works very similar to the legend values in the Graph panel. Each series gets its own row. In the Options
|
||||
tab you can select which aggregations you want using the plus button the Columns section.
|
||||
|
||||
<img src="/img/v2.6/table_ts_to_aggregations.png">
|
||||
|
||||
You have to think about how accurate the aggregations will be. It depends on what aggregation is used in the time series query,
|
||||
how many data points are fetched, etc. The time series aggregations are calculated by Grafana after aggregation is performed
|
||||
by the time series database.
|
||||
|
||||
### Raw logs queries
|
||||
|
||||
If you want to show documents from Elasticsearch pick `Raw Document` as the first metric.
|
||||
|
||||
<img src="/img/v2.6/elastic_raw_doc.png">
|
||||
|
||||
This in combination with the `JSON Data` table transform will allow you to pick which fields in the document
|
||||
you want to show in the table.
|
||||
|
||||
<img src="/img/v2.6/table_json_data.png">
|
||||
|
||||
### Elasticsearch aggregations
|
||||
|
||||
You can also make Elasticsearch aggregation queries without a `Date Histogram`. This allows you to
|
||||
use Elasticsearch metric aggregations to get accurate aggregations for the selected time range.
|
||||
|
||||
<img src="/img/v2.6/elastic_aggregations.png">
|
||||
|
||||
### Annotations
|
||||
|
||||
The table can also show any annotations you have enabled in the dashboard.
|
||||
|
||||
<img src="/img/v2.6/table_annotations.png">
|
||||
|
||||
## The New InfluxDB Editor
|
||||
The new InfluxDB editor is a lot more flexible and powerful. It supports nested functions, like `derivative`.
|
||||
It also uses the same technique as the Graphite query editor in that it presents nested functions as chain of function
|
||||
transformations. It tries to simplify and unify the complicated nature of InfluxDB's query language.
|
||||
|
||||
<img src="/img/v2.6/influxdb_editor_v3.gif">
|
||||
|
||||
In the `SELECT` row you can specify what fields and functions you want to use. If you have a
|
||||
group by time you need an aggregation function. Some functions like derivative require an aggregation function.
|
||||
|
||||
The editor tries simplify and unify this part of the query. For example:
|
||||

|
||||
|
||||
The above will generate the following InfluxDB `SELECT` clause:
|
||||
|
||||
```sql
|
||||
SELECT derivative(mean("value"), 10s) /10 AS "REQ/s" FROM ....
|
||||
```
|
||||
|
||||
### Select multiple fields
|
||||
Use the plus button and select Field > field to add another SELECT clause. You can also
|
||||
specify an asterix `*` to select all fields.
|
||||
|
||||
### Group By
|
||||
To group by a tag click the plus icon at the end of the GROUP BY row. Pick a tag from the dropdown that appears.
|
||||
You can remove the group by by clicking on the `tag` and then click on the x icon.
|
||||
|
||||
The new editor also allows you to remove group by time and select `raw` table data. Which is very useful
|
||||
in combination with the new Table panel to show raw log data stored in InfluxDB.
|
||||
|
||||
<img src="/img/v2.6/table_influxdb_logs.png">
|
||||
|
||||
## Pipeline metrics
|
||||
|
||||
If you have Elasticsearch 2.x and Grafana 2.6 or above then you can use pipeline metric aggregations like
|
||||
**Moving Average** and **Derivative**. Elasticsearch pipeline metrics require another metric to be based on. Use the eye icon next to the metric
|
||||
to hide metrics from appearing in the graph.
|
||||
|
||||

|
||||
|
||||
## Changelog
|
||||
For a detailed list and link to github issues for everything included in the 2.6 release please
|
||||
view the [CHANGELOG.md](https://github.com/grafana/grafana/blob/master/CHANGELOG.md) file.
|
||||
|
||||
- - -
|
||||
|
||||
<a href="http://grafana.org/download">Download Grafana 2.6 now</a>
|
||||
|
@ -355,7 +355,7 @@ Set to `true` to enable auto sign up of users who do not exist in Grafana DB. De
|
||||
|
||||
### provider
|
||||
|
||||
Valid values are `memory`, `file`, `mysql`, `postgres`. Default is `file`.
|
||||
Valid values are `memory`, `file`, `mysql`, `postgres`, `memcache`. Default is `file`.
|
||||
|
||||
### provider_config
|
||||
|
||||
@ -365,6 +365,7 @@ session provider you have configured.
|
||||
- **file:** session file path, e.g. `data/sessions`
|
||||
- **mysql:** go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
|
||||
- **postgres:** ex: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
|
||||
- **memcache:** ex: 127.0.0.1:11211
|
||||
|
||||
If you use MySQL or Postgres as the session store you need to create the
|
||||
session table manually.
|
||||
|
@ -10,13 +10,13 @@ page_keywords: grafana, installation, debian, ubuntu, guide
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
.deb for Debian-based Linux | [grafana_2.5.0_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_2.5.0_amd64.deb)
|
||||
.deb for Debian-based Linux | [grafana_2.6.0_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb)
|
||||
|
||||
## Install
|
||||
|
||||
$ wget https://grafanarel.s3.amazonaws.com/builds/grafana_2.5.0_amd64.deb
|
||||
$ wget https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb
|
||||
$ sudo apt-get install -y adduser libfontconfig
|
||||
$ sudo dpkg -i grafana_2.5.0_amd64.deb
|
||||
$ sudo dpkg -i grafana_2.6.0_amd64.deb
|
||||
|
||||
## APT Repository
|
||||
|
||||
|
@ -10,24 +10,24 @@ page_keywords: grafana, installation, centos, fedora, opensuse, redhat, guide
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
.RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [grafana-2.5.0-1.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-2.5.0-1.x86_64.rpm)
|
||||
.RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [grafana-2.6.0-1.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm)
|
||||
|
||||
## Install from package file
|
||||
|
||||
You can install Grafana using Yum directly.
|
||||
|
||||
$ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-2.5.0-1.x86_64.rpm
|
||||
$ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm
|
||||
|
||||
Or install manually using `rpm`.
|
||||
|
||||
#### On CentOS / Fedora / Redhat:
|
||||
|
||||
$ sudo yum install initscripts fontconfig
|
||||
$ sudo rpm -Uvh grafana-2.5.0-1.x86_64.rpm
|
||||
$ sudo rpm -Uvh grafana-2.6.0-1.x86_64.rpm
|
||||
|
||||
#### On OpenSuse:
|
||||
|
||||
$ sudo rpm -i --nodeps grafana-2.5.0-1.x86_64.rpm
|
||||
$ sudo rpm -i --nodeps grafana-2.6.0-1.x86_64.rpm
|
||||
|
||||
## Install via YUM Repository
|
||||
|
||||
|
86
docs/sources/reference/table_panel.md
Normal file
86
docs/sources/reference/table_panel.md
Normal file
@ -0,0 +1,86 @@
|
||||
----
|
||||
page_title: Table Panel
|
||||
page_description: Table Panel Reference
|
||||
page_keywords: grafana, table, panel, documentation
|
||||
---
|
||||
|
||||
# Table Panel
|
||||
|
||||
<img src="/img/v2/table-panel.png">
|
||||
|
||||
The new table panel is very flexible, supporting both multiple modes for time series as well as for
|
||||
table, annotation and raw JSON data. It also provides date formatting and value formatting and coloring options.
|
||||
|
||||
To view table panels in action and test different configurations with sample data, check out the [Table Panel Showcase in the Grafana Playground](http://play.grafana.org/dashboard/db/table-panel-showcase).
|
||||
|
||||
## Options overview
|
||||
|
||||
The table panel has many ways to manipulate your data for optimal presentation.
|
||||
|
||||
<img class="no-shadow" src="/img/v2/table-config.png">
|
||||
|
||||
1. `Data`: Control how your query is transformed into a table.
|
||||
2. `Table Display`: Table display options.
|
||||
3. `Column Styles`: Column value formatting and display options.
|
||||
|
||||
## Data to Table
|
||||
|
||||
<img class="no-shadow" src="/img/v2/table-data-options.png">
|
||||
|
||||
The data section contains the **To Table Transform (1)**. This is the primary option for how your data/metric
|
||||
query should be transformed into a table format. The **Columns (2)** option allows you to select what columns
|
||||
you want in the table. Only applicable for some transforms.
|
||||
|
||||
### Time series to rows
|
||||
|
||||
<img src="/img/v2/table_ts_to_rows.png">
|
||||
|
||||
In the most simple mode you can turn time series to rows. This means you get a `Time`, `Metric` and a `Value` column. Where `Metric` is the name of the time series.
|
||||
|
||||
### Time series to columns
|
||||
|
||||

|
||||
|
||||
This transform allows you to take multiple time series and group them by time. Which will result in the primary column being `Time` and a column for each time series.
|
||||
|
||||
### Time series aggregations
|
||||
|
||||

|
||||
This table transformation will lay out your table into rows by metric, allowing columns of `Avg`, `Min`, `Max`, `Total`, `Current` and `Count`. More than one column can be added.
|
||||
|
||||
### Annotations
|
||||

|
||||
|
||||
If you have annotations enabled in the dashboard you can have the table show them. If you configure this
|
||||
mode then any queries you have in the metrics tab will be ignored.
|
||||
|
||||
### JSON Data
|
||||

|
||||
|
||||
If you have an Elasticsearch **Raw Document** query or an Elasticsearch query without a `date histogram` use this
|
||||
transform mode and pick the columns using the **Columns** section.
|
||||
|
||||

|
||||
|
||||
## Table Display
|
||||
|
||||
<img class="no-shadow" src="/img/v2/table-display.png">
|
||||
|
||||
1. `Pagination (Page Size)`: The table display fields allow you to control The `Pagination` (page size) is the threshold at which the table rows will be broken into pages. For example, if your table had 95 records with a pagination value of 10, your table would be split across 9 pages.
|
||||
2. `Scroll`: The `scroll bar` checkbox toggles the ability to scroll within the panel, when unchecked, the panel height will grow to display all rows.
|
||||
3. `Font Size`: The `font size` field allows you to increase or decrease the size for the panel, relative to the default font size.
|
||||
|
||||
|
||||
## Column Styles
|
||||
|
||||
The column styles allow you control how dates and numbers are formatted.
|
||||
|
||||
<img class="no-shadow" src="/img/v2/Column-Options.png">
|
||||
|
||||
1. `Name or regex`: The Name or Regex field controls what columns the rule should be applied to. The regex or name filter will be matched against the column name not against column values.
|
||||
2. `Type`: The three supported types of types are `Number`, `String` and `Date`.
|
||||
3. `Format`: Specify date format. Only available when `Type` is set to `Date`.
|
||||
4. `Coloring` and `Thresholds`: Specify color mode and thresholds limits.
|
||||
5. `Unit` and `Decimals`: Specify unit and decimal precision for numbers.
|
||||
6. `Add column style rule`: Add new column rule.
|
||||
|
@ -1,3 +1,4 @@
|
||||
<li><a class='version' href='/v2.6'>Version v2.6</a></li>
|
||||
<li><a class='version' href='/v2.5'>Version v2.5</a></li>
|
||||
<li><a class='version' href='/v2.1'>Version v2.1</a></li>
|
||||
<li><a class='version' href='/v2.0'>Version v2.0</a></li>
|
||||
|
@ -4,14 +4,18 @@ module.exports = function(config) {
|
||||
config.set({
|
||||
basePath: __dirname + '/public_gen',
|
||||
|
||||
frameworks: ['mocha', 'requirejs', 'expect', 'sinon'],
|
||||
frameworks: ['mocha', 'expect', 'sinon'],
|
||||
|
||||
// list of files / patterns to load in the browser
|
||||
files: [
|
||||
'vendor/npm/es5-shim/es5-shim.js',
|
||||
'vendor/npm/es5-shim/es5-sham.js',
|
||||
'vendor/npm/es6-shim/es6-shim.js',
|
||||
'vendor/npm/es6-promise/dist/es6-promise.js',
|
||||
'vendor/npm/systemjs/dist/system.src.js',
|
||||
'test/test-main.js',
|
||||
{pattern: 'app/**/*.js', included: false},
|
||||
{pattern: 'vendor/**/*.js', included: false},
|
||||
{pattern: 'test/**/*.js', included: false}
|
||||
|
||||
{pattern: '**/*.js', included: false},
|
||||
],
|
||||
|
||||
// list of files to exclude
|
||||
@ -23,7 +27,7 @@ module.exports = function(config) {
|
||||
logLevel: config.LOG_INFO,
|
||||
autoWatch: true,
|
||||
browsers: ['PhantomJS'],
|
||||
captureTimeout: 60000,
|
||||
captureTimeout: 2000,
|
||||
singleRun: true,
|
||||
autoWatchBatchDelay: 1000,
|
||||
|
||||
|
62
package.json
62
package.json
@ -4,53 +4,53 @@
|
||||
"company": "Coding Instinct AB"
|
||||
},
|
||||
"name": "grafana",
|
||||
"version": "2.6.0-pre1",
|
||||
"version": "3.0.0-pre1",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "http://github.com/torkelo/grafana.git"
|
||||
},
|
||||
"devDependencies": {
|
||||
"angular2": "2.0.0-beta.0",
|
||||
"es6-promise": "^3.0.2",
|
||||
"es6-shim": "^0.33.3",
|
||||
"expect.js": "~0.2.0",
|
||||
"glob": "~3.2.7",
|
||||
"grunt": "~0.4.0",
|
||||
"grunt-angular-templates": "^0.5.5",
|
||||
"grunt-cli": "~0.1.13",
|
||||
"grunt-contrib-clean": "~0.5.0",
|
||||
"grunt-contrib-compress": "~0.13.0",
|
||||
"grunt-contrib-concat": "^0.4.0",
|
||||
"grunt-contrib-connect": "~0.5.0",
|
||||
"grunt-contrib-copy": "~0.5.0",
|
||||
"grunt-contrib-cssmin": "~0.6.1",
|
||||
"grunt-contrib-clean": "~0.7.0",
|
||||
"grunt-contrib-compress": "~0.14.0",
|
||||
"grunt-contrib-concat": "^0.5.1",
|
||||
"grunt-contrib-copy": "~0.8.2",
|
||||
"grunt-contrib-cssmin": "~0.14.0",
|
||||
"grunt-contrib-htmlmin": "~0.6.0",
|
||||
"grunt-contrib-jshint": "~0.10.0",
|
||||
"grunt-contrib-jshint": "~0.11.3",
|
||||
"grunt-contrib-less": "~0.7.0",
|
||||
"grunt-contrib-requirejs": "~0.4.4",
|
||||
"grunt-contrib-uglify": "~0.8.0",
|
||||
"grunt-contrib-uglify": "~0.11.0",
|
||||
"grunt-contrib-watch": "^0.6.1",
|
||||
"grunt-filerev": "^0.2.1",
|
||||
"grunt-git-describe": "~2.3.2",
|
||||
"grunt-karma": "~0.8.3",
|
||||
"grunt-ng-annotate": "^0.9.2",
|
||||
"grunt-string-replace": "~0.2.4",
|
||||
"grunt-karma": "~0.12.1",
|
||||
"grunt-ng-annotate": "^1.0.1",
|
||||
"grunt-string-replace": "~1.2.1",
|
||||
"grunt-systemjs-builder": "^0.2.5",
|
||||
"grunt-tslint": "^2.5.0",
|
||||
"grunt-typescript": "^0.7.0",
|
||||
"grunt-typescript": "^0.8.0",
|
||||
"grunt-usemin": "3.0.0",
|
||||
"jshint-stylish": "~0.1.5",
|
||||
"karma": "~0.12.31",
|
||||
"karma-chrome-launcher": "~0.1.4",
|
||||
"karma-coffee-preprocessor": "~0.1.2",
|
||||
"karma-coverage": "0.3.1",
|
||||
"karma-coveralls": "0.1.5",
|
||||
"karma": "~0.13.15",
|
||||
"karma-chrome-launcher": "~0.2.2",
|
||||
"karma-coverage": "0.5.3",
|
||||
"karma-coveralls": "1.1.2",
|
||||
"karma-expect": "~1.1.0",
|
||||
"karma-mocha": "~0.1.10",
|
||||
"karma-phantomjs-launcher": "0.1.4",
|
||||
"karma-requirejs": "0.2.2",
|
||||
"karma-script-launcher": "0.1.0",
|
||||
"load-grunt-tasks": "0.2.0",
|
||||
"mocha": "2.2.4",
|
||||
"requirejs": "2.1.17",
|
||||
"rjs-build-analysis": "0.0.3",
|
||||
"tslint": "^2.5.0-beta"
|
||||
"karma-mocha": "~0.2.1",
|
||||
"karma-phantomjs-launcher": "0.2.1",
|
||||
"load-grunt-tasks": "3.4.0",
|
||||
"mocha": "2.3.4",
|
||||
"reflect-metadata": "0.1.2",
|
||||
"rxjs": "5.0.0-beta.0",
|
||||
"systemjs": "0.19.6",
|
||||
"zone.js": "0.5.10"
|
||||
},
|
||||
"engines": {
|
||||
"node": "0.4.x",
|
||||
@ -62,10 +62,14 @@
|
||||
},
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"es5-shim": "^4.4.1",
|
||||
"grunt-jscs": "~1.5.x",
|
||||
"grunt-sync": "^0.4.1",
|
||||
"karma-sinon": "^1.0.3",
|
||||
"lodash": "^2.4.1",
|
||||
"sinon": "1.16.1"
|
||||
"sinon": "1.16.1",
|
||||
"systemjs-builder": "^0.14.15",
|
||||
"tslint": "^3.2.0",
|
||||
"typescript": "^1.7.5"
|
||||
}
|
||||
}
|
||||
|
@ -15,3 +15,5 @@ CONF_DIR=/etc/grafana
|
||||
CONF_FILE=/etc/grafana/grafana.ini
|
||||
|
||||
RESTART_ON_UPGRADE=false
|
||||
|
||||
PLUGINS_DIR=/var/lib/grafana/plugins
|
||||
|
@ -30,12 +30,14 @@ GRAFANA_HOME=/usr/share/grafana
|
||||
CONF_DIR=/etc/grafana
|
||||
WORK_DIR=$GRAFANA_HOME
|
||||
DATA_DIR=/var/lib/grafana
|
||||
PLUGINS_DIR=/var/lib/grafana/plugins
|
||||
LOG_DIR=/var/log/grafana
|
||||
CONF_FILE=$CONF_DIR/grafana.ini
|
||||
MAX_OPEN_FILES=10000
|
||||
PID_FILE=/var/run/$NAME.pid
|
||||
DAEMON=/usr/sbin/$NAME
|
||||
|
||||
|
||||
umask 0027
|
||||
|
||||
if [ `id -u` -ne 0 ]; then
|
||||
@ -59,7 +61,7 @@ if [ -f "$DEFAULT" ]; then
|
||||
. "$DEFAULT"
|
||||
fi
|
||||
|
||||
DAEMON_OPTS="--pidfile=${PID_FILE} --config=${CONF_FILE} cfg:default.paths.data=${DATA_DIR} cfg:default.paths.logs=${LOG_DIR}"
|
||||
DAEMON_OPTS="--pidfile=${PID_FILE} --config=${CONF_FILE} cfg:default.paths.data=${DATA_DIR} cfg:default.paths.logs=${LOG_DIR} cfg:default.paths.plugins=${PLUGINS_DIR}"
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
@ -14,7 +14,8 @@ ExecStart=/usr/sbin/grafana-server \
|
||||
--config=${CONF_FILE} \
|
||||
--pidfile=${PID_FILE} \
|
||||
cfg:default.paths.logs=${LOG_DIR} \
|
||||
cfg:default.paths.data=${DATA_DIR}
|
||||
cfg:default.paths.data=${DATA_DIR} \
|
||||
cfg:default.paths.plugins=${PLUGINS_DIR}
|
||||
LimitNOFILE=10000
|
||||
TimeoutStopSec=20
|
||||
UMask=0027
|
||||
|
@ -1,6 +1,6 @@
|
||||
#! /usr/bin/env bash
|
||||
|
||||
version=2.5.0
|
||||
version=2.6.0
|
||||
|
||||
wget https://grafanarel.s3.amazonaws.com/builds/grafana_${version}_amd64.deb
|
||||
|
||||
|
@ -29,6 +29,7 @@ GRAFANA_HOME=/usr/share/grafana
|
||||
CONF_DIR=/etc/grafana
|
||||
WORK_DIR=$GRAFANA_HOME
|
||||
DATA_DIR=/var/lib/grafana
|
||||
PLUGINS_DIR=/var/lib/grafana/plugins
|
||||
LOG_DIR=/var/log/grafana
|
||||
CONF_FILE=$CONF_DIR/grafana.ini
|
||||
MAX_OPEN_FILES=10000
|
||||
@ -63,7 +64,7 @@ fi
|
||||
# overwrite settings from default file
|
||||
[ -e /etc/sysconfig/$NAME ] && . /etc/sysconfig/$NAME
|
||||
|
||||
DAEMON_OPTS="--pidfile=${PID_FILE} --config=${CONF_FILE} cfg:default.paths.data=${DATA_DIR} cfg:default.paths.logs=${LOG_DIR}"
|
||||
DAEMON_OPTS="--pidfile=${PID_FILE} --config=${CONF_FILE} cfg:default.paths.data=${DATA_DIR} cfg:default.paths.logs=${LOG_DIR} cfg:default.paths.plugins=${PLUGINS_DIR}"
|
||||
|
||||
function isRunning() {
|
||||
status -p $PID_FILE $NAME > /dev/null 2>&1
|
||||
|
@ -15,3 +15,5 @@ CONF_DIR=/etc/grafana
|
||||
CONF_FILE=/etc/grafana/grafana.ini
|
||||
|
||||
RESTART_ON_UPGRADE=false
|
||||
|
||||
PLUGINS_DIR=/var/lib/grafana/plugins
|
||||
|
@ -14,7 +14,8 @@ ExecStart=/usr/sbin/grafana-server \
|
||||
--config=${CONF_FILE} \
|
||||
--pidfile=${PID_FILE} \
|
||||
cfg:default.paths.logs=${LOG_DIR} \
|
||||
cfg:default.paths.data=${DATA_DIR}
|
||||
cfg:default.paths.data=${DATA_DIR} \
|
||||
cfg:default.paths.plugins=${PLUGINS_DIR}
|
||||
LimitNOFILE=10000
|
||||
TimeoutStopSec=20
|
||||
|
||||
|
@ -68,6 +68,7 @@ func Register(r *macaron.Macaron) {
|
||||
r.Post("/api/snapshots/", bind(m.CreateDashboardSnapshotCommand{}), CreateDashboardSnapshot)
|
||||
r.Get("/dashboard/snapshot/*", Index)
|
||||
|
||||
r.Get("/api/snapshot/shared-options/", GetSharingOptions)
|
||||
r.Get("/api/snapshots/:key", GetDashboardSnapshot)
|
||||
r.Get("/api/snapshots-delete/:key", DeleteDashboardSnapshot)
|
||||
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
@ -30,13 +31,15 @@ type cwRequest struct {
|
||||
|
||||
func init() {
|
||||
actionHandlers = map[string]actionHandler{
|
||||
"GetMetricStatistics": handleGetMetricStatistics,
|
||||
"ListMetrics": handleListMetrics,
|
||||
"DescribeInstances": handleDescribeInstances,
|
||||
"__GetRegions": handleGetRegions,
|
||||
"__GetNamespaces": handleGetNamespaces,
|
||||
"__GetMetrics": handleGetMetrics,
|
||||
"__GetDimensions": handleGetDimensions,
|
||||
"GetMetricStatistics": handleGetMetricStatistics,
|
||||
"ListMetrics": handleListMetrics,
|
||||
"DescribeAlarmsForMetric": handleDescribeAlarmsForMetric,
|
||||
"DescribeAlarmHistory": handleDescribeAlarmHistory,
|
||||
"DescribeInstances": handleDescribeInstances,
|
||||
"__GetRegions": handleGetRegions,
|
||||
"__GetNamespaces": handleGetNamespaces,
|
||||
"__GetMetrics": handleGetMetrics,
|
||||
"__GetDimensions": handleGetDimensions,
|
||||
}
|
||||
}
|
||||
|
||||
@ -119,7 +122,107 @@ func handleListMetrics(req *cwRequest, c *middleware.Context) {
|
||||
Dimensions: reqParam.Parameters.Dimensions,
|
||||
}
|
||||
|
||||
resp, err := svc.ListMetrics(params)
|
||||
var resp cloudwatch.ListMetricsOutput
|
||||
err := svc.ListMetricsPages(params,
|
||||
func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool {
|
||||
metrics, _ := awsutil.ValuesAtPath(page, "Metrics")
|
||||
for _, metric := range metrics {
|
||||
resp.Metrics = append(resp.Metrics, metric.(*cloudwatch.Metric))
|
||||
}
|
||||
return !lastPage
|
||||
})
|
||||
if err != nil {
|
||||
c.JsonApiErr(500, "Unable to call AWS API", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(200, resp)
|
||||
}
|
||||
|
||||
func handleDescribeAlarmsForMetric(req *cwRequest, c *middleware.Context) {
|
||||
sess := session.New()
|
||||
creds := credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: req.DataSource.Database},
|
||||
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute},
|
||||
})
|
||||
|
||||
cfg := &aws.Config{
|
||||
Region: aws.String(req.Region),
|
||||
Credentials: creds,
|
||||
}
|
||||
|
||||
svc := cloudwatch.New(session.New(cfg), cfg)
|
||||
|
||||
reqParam := &struct {
|
||||
Parameters struct {
|
||||
Namespace string `json:"namespace"`
|
||||
MetricName string `json:"metricName"`
|
||||
Dimensions []*cloudwatch.Dimension `json:"dimensions"`
|
||||
Statistic string `json:"statistic"`
|
||||
Period int64 `json:"period"`
|
||||
} `json:"parameters"`
|
||||
}{}
|
||||
json.Unmarshal(req.Body, reqParam)
|
||||
|
||||
params := &cloudwatch.DescribeAlarmsForMetricInput{
|
||||
Namespace: aws.String(reqParam.Parameters.Namespace),
|
||||
MetricName: aws.String(reqParam.Parameters.MetricName),
|
||||
Period: aws.Int64(reqParam.Parameters.Period),
|
||||
}
|
||||
if len(reqParam.Parameters.Dimensions) != 0 {
|
||||
params.Dimensions = reqParam.Parameters.Dimensions
|
||||
}
|
||||
if reqParam.Parameters.Statistic != "" {
|
||||
params.Statistic = aws.String(reqParam.Parameters.Statistic)
|
||||
}
|
||||
|
||||
resp, err := svc.DescribeAlarmsForMetric(params)
|
||||
if err != nil {
|
||||
c.JsonApiErr(500, "Unable to call AWS API", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(200, resp)
|
||||
}
|
||||
|
||||
func handleDescribeAlarmHistory(req *cwRequest, c *middleware.Context) {
|
||||
sess := session.New()
|
||||
creds := credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: req.DataSource.Database},
|
||||
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute},
|
||||
})
|
||||
|
||||
cfg := &aws.Config{
|
||||
Region: aws.String(req.Region),
|
||||
Credentials: creds,
|
||||
}
|
||||
|
||||
svc := cloudwatch.New(session.New(cfg), cfg)
|
||||
|
||||
reqParam := &struct {
|
||||
Parameters struct {
|
||||
AlarmName string `json:"alarmName"`
|
||||
HistoryItemType string `json:"historyItemType"`
|
||||
StartDate int64 `json:"startDate"`
|
||||
EndDate int64 `json:"endDate"`
|
||||
} `json:"parameters"`
|
||||
}{}
|
||||
json.Unmarshal(req.Body, reqParam)
|
||||
|
||||
params := &cloudwatch.DescribeAlarmHistoryInput{
|
||||
AlarmName: aws.String(reqParam.Parameters.AlarmName),
|
||||
StartDate: aws.Time(time.Unix(reqParam.Parameters.StartDate, 0)),
|
||||
EndDate: aws.Time(time.Unix(reqParam.Parameters.EndDate, 0)),
|
||||
}
|
||||
if reqParam.Parameters.HistoryItemType != "" {
|
||||
params.HistoryItemType = aws.String(reqParam.Parameters.HistoryItemType)
|
||||
}
|
||||
|
||||
resp, err := svc.DescribeAlarmHistory(params)
|
||||
if err != nil {
|
||||
c.JsonApiErr(500, "Unable to call AWS API", err)
|
||||
return
|
||||
@ -160,7 +263,15 @@ func handleDescribeInstances(req *cwRequest, c *middleware.Context) {
|
||||
params.InstanceIds = reqParam.Parameters.InstanceIds
|
||||
}
|
||||
|
||||
resp, err := svc.DescribeInstances(params)
|
||||
var resp ec2.DescribeInstancesOutput
|
||||
err := svc.DescribeInstancesPages(params,
|
||||
func(page *ec2.DescribeInstancesOutput, lastPage bool) bool {
|
||||
reservations, _ := awsutil.ValuesAtPath(page, "Reservations")
|
||||
for _, reservation := range reservations {
|
||||
resp.Reservations = append(resp.Reservations, reservation.(*ec2.Reservation))
|
||||
}
|
||||
return !lastPage
|
||||
})
|
||||
if err != nil {
|
||||
c.JsonApiErr(500, "Unable to call AWS API", err)
|
||||
return
|
||||
|
@ -15,31 +15,47 @@ func init() {
|
||||
metricsMap = map[string][]string{
|
||||
"AWS/AutoScaling": {"GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity", "GroupInServiceInstances", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"},
|
||||
"AWS/Billing": {"EstimatedCharges"},
|
||||
"AWS/EC2": {"CPUCreditUsage", "CPUCreditBalance", "CPUUtilization", "DiskReadOps", "DiskWriteOps", "DiskReadBytes", "DiskWriteBytes", "NetworkIn", "NetworkOut", "StatusCheckFailed", "StatusCheckFailed_Instance", "StatusCheckFailed_System"},
|
||||
"AWS/ECS": {"CPUUtilization", "MemoryUtilization"},
|
||||
"AWS/CloudFront": {"Requests", "BytesDownloaded", "BytesUploaded", "TotalErrorRate", "4xxErrorRate", "5xxErrorRate"},
|
||||
"AWS/CloudSearch": {"SuccessfulRequests", "SearchableDocuments", "IndexUtilization", "Partitions"},
|
||||
"AWS/DynamoDB": {"ConditionalCheckFailedRequests", "ConsumedReadCapacityUnits", "ConsumedWriteCapacityUnits", "OnlineIndexConsumedWriteCapacity", "OnlineIndexPercentageProgress", "OnlineIndexThrottleEvents", "ProvisionedReadCapacityUnits", "ProvisionedWriteCapacityUnits", "ReadThrottleEvents", "ReturnedItemCount", "SuccessfulRequestLatency", "SystemErrors", "ThrottledRequests", "UserErrors", "WriteThrottleEvents"},
|
||||
"AWS/ECS": {"CPUUtilization", "MemoryUtilization"},
|
||||
"AWS/ElastiCache": {
|
||||
"CPUUtilization", "SwapUsage", "FreeableMemory", "NetworkBytesIn", "NetworkBytesOut",
|
||||
"CPUUtilization", "FreeableMemory", "NetworkBytesIn", "NetworkBytesOut", "SwapUsage",
|
||||
"BytesUsedForCacheItems", "BytesReadIntoMemcached", "BytesWrittenOutFromMemcached", "CasBadval", "CasHits", "CasMisses", "CmdFlush", "CmdGet", "CmdSet", "CurrConnections", "CurrItems", "DecrHits", "DecrMisses", "DeleteHits", "DeleteMisses", "Evictions", "GetHits", "GetMisses", "IncrHits", "IncrMisses", "Reclaimed",
|
||||
"CurrConnections", "Evictions", "Reclaimed", "NewConnections", "BytesUsedForCache", "CacheHits", "CacheMisses", "ReplicationLag", "GetTypeCmds", "SetTypeCmds", "KeyBasedCmds", "StringBasedCmds", "HashBasedCmds", "ListBasedCmds", "SetBasedCmds", "SortedSetBasedCmds", "CurrItems",
|
||||
"BytesUsedForHash", "CmdConfigGet", "CmdConfigSet", "CmdTouch", "CurrConfig", "EvictedUnfetched", "ExpiredUnfetched", "SlabsMoved", "TouchHits", "TouchMisses",
|
||||
"NewConnections", "NewItems", "UnusedMemory",
|
||||
"BytesUsedForCache", "CacheHits", "CacheMisses", "CurrConnections", "Evictions", "HyperLogLogBasedCmds", "NewConnections", "Reclaimed", "ReplicationBytes", "ReplicationLag", "SaveInProgress",
|
||||
"CurrItems", "GetTypeCmds", "HashBasedCmds", "KeyBasedCmds", "ListBasedCmds", "SetBasedCmds", "SetTypeCmds", "SortedSetBasedCmds", "StringBasedCmds",
|
||||
},
|
||||
"AWS/EBS": {"VolumeReadBytes", "VolumeWriteBytes", "VolumeReadOps", "VolumeWriteOps", "VolumeTotalReadTime", "VolumeTotalWriteTime", "VolumeIdleTime", "VolumeQueueLength", "VolumeThroughputPercentage", "VolumeConsumedReadWriteOps"},
|
||||
"AWS/ELB": {"HealthyHostCount", "UnHealthyHostCount", "RequestCount", "Latency", "HTTPCode_ELB_4XX", "HTTPCode_ELB_5XX", "HTTPCode_Backend_2XX", "HTTPCode_Backend_3XX", "HTTPCode_Backend_4XX", "HTTPCode_Backend_5XX", "BackendConnectionErrors", "SurgeQueueLength", "SpilloverCount"},
|
||||
"AWS/ElasticMapReduce": {"CoreNodesPending", "CoreNodesRunning", "HBaseBackupFailed", "HBaseMostRecentBackupDuration", "HBaseTimeSinceLastSuccessfulBackup", "HDFSBytesRead", "HDFSBytesWritten", "HDFSUtilization", "IsIdle", "JobsFailed", "JobsRunning", "LiveDataNodes", "LiveTaskTrackers", "MapSlotsOpen", "MissingBlocks", "ReduceSlotsOpen", "RemainingMapTasks", "RemainingMapTasksPerSlot", "RemainingReduceTasks", "RunningMapTasks", "RunningReduceTasks", "S3BytesRead", "S3BytesWritten", "TaskNodesPending", "TaskNodesRunning", "TotalLoad"},
|
||||
"AWS/Kinesis": {"PutRecord.Bytes", "PutRecord.Latency", "PutRecord.Success", "PutRecords.Bytes", "PutRecords.Latency", "PutRecords.Records", "PutRecords.Success", "IncomingBytes", "IncomingRecords", "GetRecords.Bytes", "GetRecords.IteratorAgeMilliseconds", "GetRecords.Latency", "GetRecords.Success"},
|
||||
"AWS/ML": {"PredictCount", "PredictFailureCount"},
|
||||
"AWS/OpsWorks": {"cpu_idle", "cpu_nice", "cpu_system", "cpu_user", "cpu_waitio", "load_1", "load_5", "load_15", "memory_buffers", "memory_cached", "memory_free", "memory_swap", "memory_total", "memory_used", "procs"},
|
||||
"AWS/Redshift": {"CPUUtilization", "DatabaseConnections", "HealthStatus", "MaintenanceMode", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "PercentageDiskSpaceUsed", "ReadIOPS", "ReadLatency", "ReadThroughput", "WriteIOPS", "WriteLatency", "WriteThroughput"},
|
||||
"AWS/RDS": {"BinLogDiskUsage", "CPUUtilization", "DatabaseConnections", "DiskQueueDepth", "FreeableMemory", "FreeStorageSpace", "ReplicaLag", "SwapUsage", "ReadIOPS", "WriteIOPS", "ReadLatency", "WriteLatency", "ReadThroughput", "WriteThroughput", "NetworkReceiveThroughput", "NetworkTransmitThroughput"},
|
||||
"AWS/Route53": {"HealthCheckStatus", "HealthCheckPercentageHealthy"},
|
||||
"AWS/SNS": {"NumberOfMessagesPublished", "PublishSize", "NumberOfNotificationsDelivered", "NumberOfNotificationsFailed"},
|
||||
"AWS/SQS": {"NumberOfMessagesSent", "SentMessageSize", "NumberOfMessagesReceived", "NumberOfEmptyReceives", "NumberOfMessagesDeleted", "ApproximateNumberOfMessagesDelayed", "ApproximateNumberOfMessagesVisible", "ApproximateNumberOfMessagesNotVisible"},
|
||||
"AWS/S3": {"BucketSizeBytes", "NumberOfObjects"},
|
||||
"AWS/SWF": {"DecisionTaskScheduleToStartTime", "DecisionTaskStartToCloseTime", "DecisionTasksCompleted", "StartedDecisionTasksTimedOutOnClose", "WorkflowStartToCloseTime", "WorkflowsCanceled", "WorkflowsCompleted", "WorkflowsContinuedAsNew", "WorkflowsFailed", "WorkflowsTerminated", "WorkflowsTimedOut"},
|
||||
"AWS/StorageGateway": {"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "CloudBytesDownloaded", "CloudDownloadLatency", "CloudBytesUploaded", "UploadBufferFree", "UploadBufferPercentUsed", "UploadBufferUsed", "QueuedWrites", "ReadBytes", "ReadTime", "TotalCacheSize", "WriteBytes", "WriteTime", "WorkingStorageFree", "WorkingStoragePercentUsed", "WorkingStorageUsed", "CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "ReadBytes", "ReadTime", "WriteBytes", "WriteTime", "QueuedWrites"},
|
||||
"AWS/WorkSpaces": {"Available", "Unhealthy", "ConnectionAttempt", "ConnectionSuccess", "ConnectionFailure", "SessionLaunchTime", "InSessionLatency", "SessionDisconnect"},
|
||||
"AWS/EBS": {"VolumeReadBytes", "VolumeWriteBytes", "VolumeReadOps", "VolumeWriteOps", "VolumeTotalReadTime", "VolumeTotalWriteTime", "VolumeIdleTime", "VolumeQueueLength", "VolumeThroughputPercentage", "VolumeConsumedReadWriteOps"},
|
||||
"AWS/EC2": {"CPUCreditUsage", "CPUCreditBalance", "CPUUtilization", "DiskReadOps", "DiskWriteOps", "DiskReadBytes", "DiskWriteBytes", "NetworkIn", "NetworkOut", "StatusCheckFailed", "StatusCheckFailed_Instance", "StatusCheckFailed_System"},
|
||||
"AWS/ELB": {"HealthyHostCount", "UnHealthyHostCount", "RequestCount", "Latency", "HTTPCode_ELB_4XX", "HTTPCode_ELB_5XX", "HTTPCode_Backend_2XX", "HTTPCode_Backend_3XX", "HTTPCode_Backend_4XX", "HTTPCode_Backend_5XX", "BackendConnectionErrors", "SurgeQueueLength", "SpilloverCount"},
|
||||
"AWS/ElasticMapReduce": {"IsIdle", "JobsRunning", "JobsFailed",
|
||||
"MapTasksRunning", "MapTasksRemaining", "MapSlotsOpen", "RemainingMapTasksPerSlot", "ReduceTasksRunning", "ReduceTasksRemaining", "ReduceSlotsOpen",
|
||||
"CoreNodesRunning", "CoreNodesPending", "LiveDataNodes", "TaskNodesRunning", "TaskNodesPending", "LiveTaskTrackers",
|
||||
"S3BytesWritten", "S3BytesRead", "HDFSUtilization", "HDFSBytesRead", "HDFSBytesWritten", "MissingBlocks", "TotalLoad",
|
||||
"BackupFailed", "MostRecentBackupDuration", "TimeSinceLastSuccessfulBackup",
|
||||
"IsIdle", "ContainerAllocated", "ContainerReserved", "ContainerPending", "AppsCompleted", "AppsFailed", "AppsKilled", "AppsPending", "AppsRunning", "AppsSubmitted",
|
||||
"CoreNodesRunning", "CoreNodesPending", "LiveDataNodes", "MRTotalNodes", "MRActiveNodes", "MRLostNodes", "MRUnhealthyNodes", "MRDecommissionedNodes", "MRRebootedNodes",
|
||||
"S3BytesWritten", "S3BytesRead", "HDFSUtilization", "HDFSBytesRead", "HDFSBytesWritten", "MissingBlocks", "CorruptBlocks", "TotalLoad", "MemoryTotalMB", "MemoryReservedMB", "MemoryAvailableMB", "MemoryAllocatedMB", "PendingDeletionBlocks", "UnderReplicatedBlocks", "DfsPendingReplicationBlocks", "CapacityRemainingGB",
|
||||
"HbaseBackupFailed", "MostRecentBackupDuration", "TimeSinceLastSuccessfulBackup"},
|
||||
"AWS/ES": {"ClusterStatus.green", "ClusterStatus.yellow", "ClusterStatus.red", "Nodes", "SearchableDocuments", "DeletedDocuments", "CPUUtilization", "FreeStorageSpace", "JVMMemoryPressure", "AutomatedSnapshotFailure", "MasterCPUUtilization", "MasterFreeStorageSpace", "MasterJVMMemoryPressure", "ReadLatency", "WriteLatency", "ReadThroughput", "WriteThroughput", "DiskQueueLength", "ReadIOPS", "WriteIOPS"},
|
||||
"AWS/Kinesis": {"PutRecord.Bytes", "PutRecord.Latency", "PutRecord.Success", "PutRecords.Bytes", "PutRecords.Latency", "PutRecords.Records", "PutRecords.Success", "IncomingBytes", "IncomingRecords", "GetRecords.Bytes", "GetRecords.IteratorAgeMilliseconds", "GetRecords.Latency", "GetRecords.Success"},
|
||||
"AWS/Lambda": {"Invocations", "Errors", "Duration", "Throttles"},
|
||||
"AWS/ML": {"PredictCount", "PredictFailureCount"},
|
||||
"AWS/OpsWorks": {"cpu_idle", "cpu_nice", "cpu_system", "cpu_user", "cpu_waitio", "load_1", "load_5", "load_15", "memory_buffers", "memory_cached", "memory_free", "memory_swap", "memory_total", "memory_used", "procs"},
|
||||
"AWS/Redshift": {"CPUUtilization", "DatabaseConnections", "HealthStatus", "MaintenanceMode", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "PercentageDiskSpaceUsed", "ReadIOPS", "ReadLatency", "ReadThroughput", "WriteIOPS", "WriteLatency", "WriteThroughput"},
|
||||
"AWS/RDS": {"BinLogDiskUsage", "CPUUtilization", "CPUCreditUsage", "CPUCreditBalance", "DatabaseConnections", "DiskQueueDepth", "FreeableMemory", "FreeStorageSpace", "ReplicaLag", "SwapUsage", "ReadIOPS", "WriteIOPS", "ReadLatency", "WriteLatency", "ReadThroughput", "WriteThroughput", "NetworkReceiveThroughput", "NetworkTransmitThroughput"},
|
||||
"AWS/Route53": {"HealthCheckStatus", "HealthCheckPercentageHealthy"},
|
||||
"AWS/SNS": {"NumberOfMessagesPublished", "PublishSize", "NumberOfNotificationsDelivered", "NumberOfNotificationsFailed"},
|
||||
"AWS/SQS": {"NumberOfMessagesSent", "SentMessageSize", "NumberOfMessagesReceived", "NumberOfEmptyReceives", "NumberOfMessagesDeleted", "ApproximateNumberOfMessagesDelayed", "ApproximateNumberOfMessagesVisible", "ApproximateNumberOfMessagesNotVisible"},
|
||||
"AWS/S3": {"BucketSizeBytes", "NumberOfObjects"},
|
||||
"AWS/SWF": {"DecisionTaskScheduleToStartTime", "DecisionTaskStartToCloseTime", "DecisionTasksCompleted", "StartedDecisionTasksTimedOutOnClose", "WorkflowStartToCloseTime", "WorkflowsCanceled", "WorkflowsCompleted", "WorkflowsContinuedAsNew", "WorkflowsFailed", "WorkflowsTerminated", "WorkflowsTimedOut",
|
||||
"ActivityTaskScheduleToCloseTime", "ActivityTaskScheduleToStartTime", "ActivityTaskStartToCloseTime", "ActivityTasksCanceled", "ActivityTasksCompleted", "ActivityTasksFailed", "ScheduledActivityTasksTimedOutOnClose", "ScheduledActivityTasksTimedOutOnStart", "StartedActivityTasksTimedOutOnClose", "StartedActivityTasksTimedOutOnHeartbeat"},
|
||||
"AWS/StorageGateway": {"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "CloudBytesDownloaded", "CloudDownloadLatency", "CloudBytesUploaded", "UploadBufferFree", "UploadBufferPercentUsed", "UploadBufferUsed", "QueuedWrites", "ReadBytes", "ReadTime", "TotalCacheSize", "WriteBytes", "WriteTime", "TimeSinceLastRecoveryPoint", "WorkingStorageFree", "WorkingStoragePercentUsed", "WorkingStorageUsed",
|
||||
"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "ReadBytes", "ReadTime", "WriteBytes", "WriteTime", "QueuedWrites"},
|
||||
"AWS/WAF": {"AllowedRequests", "BlockedRequests", "CountedRequests"},
|
||||
"AWS/WorkSpaces": {"Available", "Unhealthy", "ConnectionAttempt", "ConnectionSuccess", "ConnectionFailure", "SessionLaunchTime", "InSessionLatency", "SessionDisconnect"},
|
||||
}
|
||||
dimensionsMap = map[string][]string{
|
||||
"AWS/AutoScaling": {"AutoScalingGroupName"},
|
||||
@ -47,13 +63,15 @@ func init() {
|
||||
"AWS/CloudFront": {"DistributionId", "Region"},
|
||||
"AWS/CloudSearch": {},
|
||||
"AWS/DynamoDB": {"TableName", "GlobalSecondaryIndexName", "Operation"},
|
||||
"AWS/ECS": {"ClusterName", "ServiceName"},
|
||||
"AWS/ElastiCache": {"CacheClusterId", "CacheNodeId"},
|
||||
"AWS/EBS": {"VolumeId"},
|
||||
"AWS/EC2": {"AutoScalingGroupName", "ImageId", "InstanceId", "InstanceType"},
|
||||
"AWS/ECS": {"ClusterName", "ServiceName"},
|
||||
"AWS/ELB": {"LoadBalancerName", "AvailabilityZone"},
|
||||
"AWS/ElasticMapReduce": {"ClusterId", "JobId"},
|
||||
"AWS/ElasticMapReduce": {"ClusterId", "JobFlowId", "JobId"},
|
||||
"AWS/ES": {},
|
||||
"AWS/Kinesis": {"StreamName"},
|
||||
"AWS/Lambda": {"FunctionName"},
|
||||
"AWS/ML": {"MLModelId", "RequestMode"},
|
||||
"AWS/OpsWorks": {"StackId", "LayerId", "InstanceId"},
|
||||
"AWS/Redshift": {"NodeID", "ClusterIdentifier"},
|
||||
@ -62,8 +80,9 @@ func init() {
|
||||
"AWS/SNS": {"Application", "Platform", "TopicName"},
|
||||
"AWS/SQS": {"QueueName"},
|
||||
"AWS/S3": {"BucketName", "StorageType"},
|
||||
"AWS/SWF": {"Domain", "ActivityTypeName", "ActivityTypeVersion"},
|
||||
"AWS/SWF": {"Domain", "WorkflowTypeName", "WorkflowTypeVersion", "ActivityTypeName", "ActivityTypeVersion"},
|
||||
"AWS/StorageGateway": {"GatewayId", "GatewayName", "VolumeId"},
|
||||
"AWS/WAF": {"Rule", "WebACL"},
|
||||
"AWS/WorkSpaces": {"DirectoryId", "WorkspaceId"},
|
||||
}
|
||||
}
|
||||
@ -113,6 +132,7 @@ func handleGetMetrics(req *cwRequest, c *middleware.Context) {
|
||||
c.JsonApiErr(404, "Unable to find namespace "+reqParam.Parameters.Namespace, nil)
|
||||
return
|
||||
}
|
||||
sort.Sort(sort.StringSlice(namespaceMetrics))
|
||||
|
||||
result := []interface{}{}
|
||||
for _, name := range namespaceMetrics {
|
||||
@ -136,6 +156,7 @@ func handleGetDimensions(req *cwRequest, c *middleware.Context) {
|
||||
c.JsonApiErr(404, "Unable to find dimension "+reqParam.Parameters.Namespace, nil)
|
||||
return
|
||||
}
|
||||
sort.Sort(sort.StringSlice(dimensionValues))
|
||||
|
||||
result := []interface{}{}
|
||||
for _, name := range dimensionValues {
|
||||
|
@ -48,6 +48,20 @@ func GetDashboard(c *middleware.Context) {
|
||||
}
|
||||
|
||||
dash := query.Result
|
||||
|
||||
// Finding the last updater of the dashboard
|
||||
updater := "Anonymous"
|
||||
if dash.UpdatedBy != 0 {
|
||||
userQuery := m.GetUserByIdQuery{Id: dash.UpdatedBy}
|
||||
userErr := bus.Dispatch(&userQuery)
|
||||
if userErr != nil {
|
||||
updater = "Unknown"
|
||||
} else {
|
||||
user := userQuery.Result
|
||||
updater = user.Login
|
||||
}
|
||||
}
|
||||
|
||||
dto := dtos.DashboardFullWithMeta{
|
||||
Dashboard: dash.Data,
|
||||
Meta: dtos.DashboardMeta{
|
||||
@ -59,6 +73,7 @@ func GetDashboard(c *middleware.Context) {
|
||||
CanEdit: canEditDashboard(c.OrgRole),
|
||||
Created: dash.Created,
|
||||
Updated: dash.Updated,
|
||||
UpdatedBy: updater,
|
||||
},
|
||||
}
|
||||
|
||||
@ -88,6 +103,12 @@ func DeleteDashboard(c *middleware.Context) {
|
||||
func PostDashboard(c *middleware.Context, cmd m.SaveDashboardCommand) {
|
||||
cmd.OrgId = c.OrgId
|
||||
|
||||
if !c.IsSignedIn {
|
||||
cmd.UpdatedBy = 0
|
||||
} else {
|
||||
cmd.UpdatedBy = c.UserId
|
||||
}
|
||||
|
||||
dash := cmd.GetDashboardModel()
|
||||
if dash.Id == 0 {
|
||||
limitReached, err := middleware.QuotaReached(c, "dashboard")
|
||||
|
@ -12,6 +12,14 @@ import (
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
func GetSharingOptions(c *middleware.Context) {
|
||||
c.JSON(200, util.DynMap{
|
||||
"externalSnapshotURL": setting.ExternalSnapshotUrl,
|
||||
"externalSnapshotName": setting.ExternalSnapshotName,
|
||||
"externalEnabled": setting.ExternalEnabled,
|
||||
})
|
||||
}
|
||||
|
||||
func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapshotCommand) {
|
||||
if cmd.External {
|
||||
// external snapshot ref requires key and delete key
|
||||
|
@ -66,6 +66,7 @@ func GetDataSourceById(c *middleware.Context) Response {
|
||||
BasicAuth: ds.BasicAuth,
|
||||
BasicAuthUser: ds.BasicAuthUser,
|
||||
BasicAuthPassword: ds.BasicAuthPassword,
|
||||
WithCredentials: ds.WithCredentials,
|
||||
IsDefault: ds.IsDefault,
|
||||
JsonData: ds.JsonData,
|
||||
})
|
||||
|
@ -41,6 +41,7 @@ type DashboardMeta struct {
|
||||
Expires time.Time `json:"expires"`
|
||||
Created time.Time `json:"created"`
|
||||
Updated time.Time `json:"updated"`
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
}
|
||||
|
||||
type DashboardFullWithMeta struct {
|
||||
@ -61,6 +62,7 @@ type DataSource struct {
|
||||
BasicAuth bool `json:"basicAuth"`
|
||||
BasicAuthUser string `json:"basicAuthUser"`
|
||||
BasicAuthPassword string `json:"basicAuthPassword"`
|
||||
WithCredentials bool `json:"withCredentials"`
|
||||
IsDefault bool `json:"isDefault"`
|
||||
JsonData map[string]interface{} `json:"jsonData"`
|
||||
}
|
||||
|
@ -69,6 +69,9 @@ func getFrontendSettingsMap(c *middleware.Context) (map[string]interface{}, erro
|
||||
if ds.BasicAuth {
|
||||
dsMap["basicAuth"] = util.GetBasicAuthHeader(ds.BasicAuthUser, ds.BasicAuthPassword)
|
||||
}
|
||||
if ds.WithCredentials {
|
||||
dsMap["withCredentials"] = ds.WithCredentials
|
||||
}
|
||||
|
||||
if ds.Type == m.DS_INFLUXDB_08 {
|
||||
dsMap["username"] = ds.User
|
||||
@ -127,6 +130,7 @@ func getFrontendSettingsMap(c *middleware.Context) (map[string]interface{}, erro
|
||||
"panels": panels,
|
||||
"appSubUrl": setting.AppSubUrl,
|
||||
"allowOrgCreate": (setting.AllowUserOrgCreate && c.IsSignedIn) || c.IsGrafanaAdmin,
|
||||
"authProxyEnabled": setting.AuthProxyEnabled,
|
||||
"buildInfo": map[string]interface{}{
|
||||
"version": setting.BuildVersion,
|
||||
"commit": setting.BuildCommit,
|
||||
|
@ -28,6 +28,7 @@ func LoginView(c *middleware.Context) {
|
||||
viewData.Settings["googleAuthEnabled"] = setting.OAuthService.Google
|
||||
viewData.Settings["githubAuthEnabled"] = setting.OAuthService.GitHub
|
||||
viewData.Settings["disableUserSignUp"] = !setting.AllowUserSignUp
|
||||
viewData.Settings["loginHint"] = setting.LoginHint
|
||||
|
||||
if !tryLoginUsingRememberCookie(c) {
|
||||
c.HTML(200, VIEW_INDEX, viewData)
|
||||
|
95
pkg/log/syslog.go
Normal file
95
pkg/log/syslog.go
Normal file
@ -0,0 +1,95 @@
|
||||
//+build !windows,!nacl,!plan9
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
type SyslogWriter struct {
|
||||
syslog *syslog.Writer
|
||||
Network string `json:"network"`
|
||||
Address string `json:"address"`
|
||||
Facility string `json:"facility"`
|
||||
Tag string `json:"tag"`
|
||||
}
|
||||
|
||||
func NewSyslog() LoggerInterface {
|
||||
return new(SyslogWriter)
|
||||
}
|
||||
|
||||
func (sw *SyslogWriter) Init(config string) error {
|
||||
if err := json.Unmarshal([]byte(config), sw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
prio, err := parseFacility(sw.Facility)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w, err := syslog.Dial(sw.Network, sw.Address, prio, sw.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sw.syslog = w
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sw *SyslogWriter) WriteMsg(msg string, skip, level int) error {
|
||||
var err error
|
||||
|
||||
switch level {
|
||||
case TRACE, DEBUG:
|
||||
err = sw.syslog.Debug(msg)
|
||||
case INFO:
|
||||
err = sw.syslog.Info(msg)
|
||||
case WARN:
|
||||
err = sw.syslog.Warning(msg)
|
||||
case ERROR:
|
||||
err = sw.syslog.Err(msg)
|
||||
case CRITICAL:
|
||||
err = sw.syslog.Crit(msg)
|
||||
case FATAL:
|
||||
err = sw.syslog.Alert(msg)
|
||||
default:
|
||||
err = errors.New("invalid syslog level")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (sw *SyslogWriter) Destroy() {
|
||||
sw.syslog.Close()
|
||||
}
|
||||
|
||||
func (sw *SyslogWriter) Flush() {}
|
||||
|
||||
var facilities = map[string]syslog.Priority{
|
||||
"user": syslog.LOG_USER,
|
||||
"daemon": syslog.LOG_DAEMON,
|
||||
"local0": syslog.LOG_LOCAL0,
|
||||
"local1": syslog.LOG_LOCAL1,
|
||||
"local2": syslog.LOG_LOCAL2,
|
||||
"local3": syslog.LOG_LOCAL3,
|
||||
"local4": syslog.LOG_LOCAL4,
|
||||
"local5": syslog.LOG_LOCAL5,
|
||||
"local6": syslog.LOG_LOCAL6,
|
||||
"local7": syslog.LOG_LOCAL7,
|
||||
}
|
||||
|
||||
func parseFacility(facility string) (syslog.Priority, error) {
|
||||
prio, ok := facilities[facility]
|
||||
if !ok {
|
||||
return syslog.LOG_LOCAL0, errors.New("invalid syslog facility")
|
||||
}
|
||||
|
||||
return prio, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register("syslog", NewSyslog)
|
||||
}
|
@ -29,10 +29,15 @@ func Logger() macaron.Handler {
|
||||
return func(res http.ResponseWriter, req *http.Request, c *macaron.Context) {
|
||||
start := time.Now()
|
||||
|
||||
uname := c.GetCookie(setting.CookieUserName)
|
||||
if len(uname) == 0 {
|
||||
uname = "-"
|
||||
}
|
||||
|
||||
rw := res.(macaron.ResponseWriter)
|
||||
c.Next()
|
||||
|
||||
content := fmt.Sprintf("Completed %s %v %s in %v", req.URL.Path, rw.Status(), http.StatusText(rw.Status()), time.Since(start))
|
||||
content := fmt.Sprintf("Completed %s %s \"%s %s %s\" %v %s %d bytes in %dus", c.RemoteAddr(), uname, req.Method, req.URL.Path, req.Proto, rw.Status(), http.StatusText(rw.Status()), rw.Size(), time.Since(start)/time.Microsecond)
|
||||
|
||||
switch rw.Status() {
|
||||
case 200, 304:
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/Unknwon/macaron"
|
||||
"github.com/macaron-contrib/session"
|
||||
_ "github.com/macaron-contrib/session/memcache"
|
||||
_ "github.com/macaron-contrib/session/mysql"
|
||||
_ "github.com/macaron-contrib/session/postgres"
|
||||
_ "github.com/macaron-contrib/session/redis"
|
||||
|
@ -33,6 +33,8 @@ type Dashboard struct {
|
||||
Created time.Time
|
||||
Updated time.Time
|
||||
|
||||
UpdatedBy int64
|
||||
|
||||
Title string
|
||||
Data map[string]interface{}
|
||||
}
|
||||
@ -90,6 +92,7 @@ func NewDashboardFromJson(data map[string]interface{}) *Dashboard {
|
||||
func (cmd *SaveDashboardCommand) GetDashboardModel() *Dashboard {
|
||||
dash := NewDashboardFromJson(cmd.Dashboard)
|
||||
dash.OrgId = cmd.OrgId
|
||||
dash.UpdatedBy = cmd.UpdatedBy
|
||||
dash.UpdateSlug()
|
||||
return dash
|
||||
}
|
||||
@ -113,6 +116,7 @@ type SaveDashboardCommand struct {
|
||||
Dashboard map[string]interface{} `json:"dashboard" binding:"Required"`
|
||||
Overwrite bool `json:"overwrite"`
|
||||
OrgId int64 `json:"-"`
|
||||
UpdatedBy int64 `json:"-"`
|
||||
|
||||
Result *Dashboard
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ type DataSource struct {
|
||||
BasicAuth bool
|
||||
BasicAuthUser string
|
||||
BasicAuthPassword string
|
||||
WithCredentials bool
|
||||
IsDefault bool
|
||||
JsonData map[string]interface{}
|
||||
|
||||
@ -83,6 +84,7 @@ type AddDataSourceCommand struct {
|
||||
BasicAuth bool `json:"basicAuth"`
|
||||
BasicAuthUser string `json:"basicAuthUser"`
|
||||
BasicAuthPassword string `json:"basicAuthPassword"`
|
||||
WithCredentials bool `json:"withCredentials"`
|
||||
IsDefault bool `json:"isDefault"`
|
||||
JsonData map[string]interface{} `json:"jsonData"`
|
||||
|
||||
@ -103,6 +105,7 @@ type UpdateDataSourceCommand struct {
|
||||
BasicAuth bool `json:"basicAuth"`
|
||||
BasicAuthUser string `json:"basicAuthUser"`
|
||||
BasicAuthPassword string `json:"basicAuthPassword"`
|
||||
WithCredentials bool `json:"withCredentials"`
|
||||
IsDefault bool `json:"isDefault"`
|
||||
JsonData map[string]interface{} `json:"jsonData"`
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
package plugins
|
||||
|
||||
import "github.com/grafana/grafana/pkg/models"
|
||||
import (
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
)
|
||||
|
||||
type DataSourcePlugin struct {
|
||||
Type string `json:"type"`
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -77,7 +78,7 @@ func scan(pluginDir string) error {
|
||||
pluginPath: pluginDir,
|
||||
}
|
||||
|
||||
if err := filepath.Walk(pluginDir, scanner.walker); err != nil {
|
||||
if err := util.Walk(pluginDir, true, true, scanner.walker); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -114,12 +114,14 @@ func UpdateDataSource(cmd *m.UpdateDataSourceCommand) error {
|
||||
BasicAuth: cmd.BasicAuth,
|
||||
BasicAuthUser: cmd.BasicAuthUser,
|
||||
BasicAuthPassword: cmd.BasicAuthPassword,
|
||||
WithCredentials: cmd.WithCredentials,
|
||||
JsonData: cmd.JsonData,
|
||||
Updated: time.Now(),
|
||||
}
|
||||
|
||||
sess.UseBool("is_default")
|
||||
sess.UseBool("basic_auth")
|
||||
sess.UseBool("with_credentials")
|
||||
|
||||
_, err := sess.Where("id=? and org_id=?", ds.Id, ds.OrgId).Update(ds)
|
||||
if err != nil {
|
||||
|
@ -92,4 +92,9 @@ func addDashboardMigration(mg *Migrator) {
|
||||
Sqlite("SELECT 0 WHERE 0;").
|
||||
Postgres("SELECT 0;").
|
||||
Mysql("ALTER TABLE dashboard MODIFY data MEDIUMTEXT;"))
|
||||
|
||||
// add column to store updater of a dashboard
|
||||
mg.AddMigration("Add column updated_by in dashboard - v2", NewAddColumnMigration(dashboardV2, &Column{
|
||||
Name: "updated_by", Type: DB_Int, Nullable: true,
|
||||
}))
|
||||
}
|
||||
|
@ -96,4 +96,9 @@ func addDataSourceMigration(mg *Migrator) {
|
||||
}))
|
||||
|
||||
mg.AddMigration("Drop old table data_source_v1 #2", NewDropTableMigration("data_source_v1"))
|
||||
|
||||
// add column to activate withCredentials option
|
||||
mg.AddMigration("Add column with_credentials", NewAddColumnMigration(tableV2, &Column{
|
||||
Name: "with_credentials", Type: DB_Bool, Nullable: false, Default: "0",
|
||||
}))
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ func (col *Column) StringNoPk(d Dialect) string {
|
||||
}
|
||||
|
||||
if col.Default != "" {
|
||||
sql += "DEFAULT " + col.Default + " "
|
||||
sql += "DEFAULT " + d.Default(col) + " "
|
||||
}
|
||||
|
||||
return sql
|
||||
|
@ -17,10 +17,11 @@ type Dialect interface {
|
||||
SqlType(col *Column) string
|
||||
SupportEngine() bool
|
||||
LikeStr() string
|
||||
Default(col *Column) string
|
||||
|
||||
CreateIndexSql(tableName string, index *Index) string
|
||||
CreateTableSql(table *Table) string
|
||||
AddColumnSql(tableName string, Col *Column) string
|
||||
AddColumnSql(tableName string, col *Column) string
|
||||
CopyTableData(sourceTable string, targetTable string, sourceCols []string, targetCols []string) string
|
||||
DropTable(tableName string) string
|
||||
DropIndexSql(tableName string, index *Index) string
|
||||
@ -71,6 +72,10 @@ func (b *BaseDialect) EqStr() string {
|
||||
return "="
|
||||
}
|
||||
|
||||
func (b *BaseDialect) Default(col *Column) string {
|
||||
return col.Default
|
||||
}
|
||||
|
||||
func (b *BaseDialect) CreateTableSql(table *Table) string {
|
||||
var sql string
|
||||
sql = "CREATE TABLE IF NOT EXISTS "
|
||||
|
@ -64,6 +64,10 @@ type AddColumnMigration struct {
|
||||
column *Column
|
||||
}
|
||||
|
||||
func NewAddColumnMigration(table Table, col *Column) *AddColumnMigration {
|
||||
return &AddColumnMigration{tableName: table.Name, column: col}
|
||||
}
|
||||
|
||||
func (m *AddColumnMigration) Table(tableName string) *AddColumnMigration {
|
||||
m.tableName = tableName
|
||||
return m
|
||||
|
@ -36,6 +36,17 @@ func (db *Postgres) AutoIncrStr() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (b *Postgres) Default(col *Column) string {
|
||||
if col.Type == DB_Bool {
|
||||
if col.Default == "0" {
|
||||
return "FALSE"
|
||||
} else {
|
||||
return "TRUE"
|
||||
}
|
||||
}
|
||||
return col.Default
|
||||
}
|
||||
|
||||
func (db *Postgres) SqlType(c *Column) string {
|
||||
var res string
|
||||
switch t := c.Type; t {
|
||||
|
@ -48,9 +48,10 @@ var (
|
||||
BuildStamp int64
|
||||
|
||||
// Paths
|
||||
LogsPath string
|
||||
HomePath string
|
||||
DataPath string
|
||||
LogsPath string
|
||||
HomePath string
|
||||
DataPath string
|
||||
PluginsPath string
|
||||
|
||||
// Log settings.
|
||||
LogModes []string
|
||||
@ -76,12 +77,18 @@ var (
|
||||
EmailCodeValidMinutes int
|
||||
DataProxyWhiteList map[string]bool
|
||||
|
||||
// Snapshots
|
||||
ExternalSnapshotUrl string
|
||||
ExternalSnapshotName string
|
||||
ExternalEnabled bool
|
||||
|
||||
// User settings
|
||||
AllowUserSignUp bool
|
||||
AllowUserOrgCreate bool
|
||||
AutoAssignOrg bool
|
||||
AutoAssignOrgRole string
|
||||
VerifyEmailEnabled bool
|
||||
LoginHint string
|
||||
|
||||
// Http auth
|
||||
AdminUser string
|
||||
@ -174,6 +181,9 @@ func applyEnvVariableOverrides() {
|
||||
|
||||
if len(envValue) > 0 {
|
||||
key.SetValue(envValue)
|
||||
if strings.Contains(envKey, "PASSWORD") {
|
||||
envValue = "*********"
|
||||
}
|
||||
appliedEnvOverrides = append(appliedEnvOverrides, fmt.Sprintf("%s=%s", envKey, envValue))
|
||||
}
|
||||
}
|
||||
@ -188,6 +198,9 @@ func applyCommandLineDefaultProperties(props map[string]string) {
|
||||
value, exists := props[keyString]
|
||||
if exists {
|
||||
key.SetValue(value)
|
||||
if strings.Contains(keyString, "password") {
|
||||
value = "*********"
|
||||
}
|
||||
appliedCommandLineProperties = append(appliedCommandLineProperties, fmt.Sprintf("%s=%s", keyString, value))
|
||||
}
|
||||
}
|
||||
@ -381,6 +394,7 @@ func NewConfigContext(args *CommandLineArgs) error {
|
||||
loadConfiguration(args)
|
||||
|
||||
Env = Cfg.Section("").Key("app_mode").MustString("development")
|
||||
PluginsPath = Cfg.Section("paths").Key("plugins").String()
|
||||
|
||||
server := Cfg.Section("server")
|
||||
AppUrl, AppSubUrl = parseAppUrlAndSubUrl(server)
|
||||
@ -412,6 +426,12 @@ func NewConfigContext(args *CommandLineArgs) error {
|
||||
CookieRememberName = security.Key("cookie_remember_name").String()
|
||||
DisableGravatar = security.Key("disable_gravatar").MustBool(true)
|
||||
|
||||
// read snapshots settings
|
||||
snapshots := Cfg.Section("snapshots")
|
||||
ExternalSnapshotUrl = snapshots.Key("external_snapshot_url").String()
|
||||
ExternalSnapshotName = snapshots.Key("external_snapshot_name").String()
|
||||
ExternalEnabled = snapshots.Key("external_enabled").MustBool(true)
|
||||
|
||||
// read data source proxy white list
|
||||
DataProxyWhiteList = make(map[string]bool)
|
||||
for _, hostAndIp := range security.Key("data_source_proxy_whitelist").Strings(" ") {
|
||||
@ -428,6 +448,7 @@ func NewConfigContext(args *CommandLineArgs) error {
|
||||
AutoAssignOrg = users.Key("auto_assign_org").MustBool(true)
|
||||
AutoAssignOrgRole = users.Key("auto_assign_org_role").In("Editor", []string{"Editor", "Admin", "Read Only Editor", "Viewer"})
|
||||
VerifyEmailEnabled = users.Key("verify_email_enabled").MustBool(false)
|
||||
LoginHint = users.Key("login_hint").String()
|
||||
|
||||
// anonymous access
|
||||
AnonymousEnabled = Cfg.Section("auth.anonymous").Key("enabled").MustBool(false)
|
||||
@ -471,7 +492,7 @@ func NewConfigContext(args *CommandLineArgs) error {
|
||||
func readSessionConfig() {
|
||||
sec := Cfg.Section("session")
|
||||
SessionOptions = session.Options{}
|
||||
SessionOptions.Provider = sec.Key("provider").In("memory", []string{"memory", "file", "redis", "mysql", "postgres"})
|
||||
SessionOptions.Provider = sec.Key("provider").In("memory", []string{"memory", "file", "redis", "mysql", "postgres", "memcache"})
|
||||
SessionOptions.ProviderConfig = strings.Trim(sec.Key("provider_config").String(), "\" ")
|
||||
SessionOptions.CookieName = sec.Key("cookie_name").MustString("grafana_sess")
|
||||
SessionOptions.CookiePath = AppSubUrl
|
||||
@ -565,6 +586,14 @@ func initLogging(args *CommandLineArgs) {
|
||||
"driver": sec.Key("driver").String(),
|
||||
"conn": sec.Key("conn").String(),
|
||||
}
|
||||
case "syslog":
|
||||
LogConfigs[i] = util.DynMap{
|
||||
"level": level,
|
||||
"network": sec.Key("network").MustString(""),
|
||||
"address": sec.Key("address").MustString(""),
|
||||
"facility": sec.Key("facility").MustString("local7"),
|
||||
"tag": sec.Key("tag").MustString(""),
|
||||
}
|
||||
}
|
||||
|
||||
cfgJsonBytes, _ := json.Marshal(LogConfigs[i])
|
||||
@ -599,6 +628,7 @@ func LogConfigurationInfo() {
|
||||
text.WriteString(fmt.Sprintf(" home: %s\n", HomePath))
|
||||
text.WriteString(fmt.Sprintf(" data: %s\n", DataPath))
|
||||
text.WriteString(fmt.Sprintf(" logs: %s\n", LogsPath))
|
||||
text.WriteString(fmt.Sprintf(" plugins: %s\n", PluginsPath))
|
||||
|
||||
log.Info(text.String())
|
||||
}
|
||||
|
98
pkg/util/filepath.go
Normal file
98
pkg/util/filepath.go
Normal file
@ -0,0 +1,98 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
//WalkSkipDir is the Error returned when we want to skip descending into a directory
|
||||
var WalkSkipDir = errors.New("skip this directory")
|
||||
|
||||
//WalkFunc is a callback function called for each path as a directory is walked
|
||||
//If resolvedPath != "", then we are following symbolic links.
|
||||
type WalkFunc func(resolvedPath string, info os.FileInfo, err error) error
|
||||
|
||||
//Walk walks a path, optionally following symbolic links, and for each path,
|
||||
//it calls the walkFn passed.
|
||||
//
|
||||
//It is similar to filepath.Walk, except that it supports symbolic links and
|
||||
//can detect infinite loops while following sym links.
|
||||
//It solves the issue where your WalkFunc needs a path relative to the symbolic link
|
||||
//(resolving links within walkfunc loses the path to the symbolic link for each traversal).
|
||||
func Walk(path string, followSymlinks bool, detectSymlinkInfiniteLoop bool, walkFn WalkFunc) error {
|
||||
info, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var symlinkPathsFollowed map[string]bool
|
||||
var resolvedPath string
|
||||
if followSymlinks {
|
||||
resolvedPath = path
|
||||
if detectSymlinkInfiniteLoop {
|
||||
symlinkPathsFollowed = make(map[string]bool, 8)
|
||||
}
|
||||
}
|
||||
return walk(path, info, resolvedPath, symlinkPathsFollowed, walkFn)
|
||||
}
|
||||
|
||||
//walk walks the path. It is a helper/sibling function to Walk.
|
||||
//It takes a resolvedPath into consideration. This way, paths being walked are
|
||||
//always relative to the path argument, even if symbolic links were resolved).
|
||||
//
|
||||
//If resolvedPath is "", then we are not following symbolic links.
|
||||
//If symlinkPathsFollowed is not nil, then we need to detect infinite loop.
|
||||
func walk(path string, info os.FileInfo, resolvedPath string,
|
||||
symlinkPathsFollowed map[string]bool, walkFn WalkFunc) error {
|
||||
if info == nil {
|
||||
return errors.New("Walk: Nil FileInfo passed")
|
||||
}
|
||||
err := walkFn(resolvedPath, info, nil)
|
||||
if err != nil {
|
||||
if info.IsDir() && err == WalkSkipDir {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if resolvedPath != "" && info.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
path2, err := os.Readlink(resolvedPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//vout("SymLink Path: %v, links to: %v", resolvedPath, path2)
|
||||
if symlinkPathsFollowed != nil {
|
||||
if _, ok := symlinkPathsFollowed[path2]; ok {
|
||||
errMsg := "Potential SymLink Infinite Loop. Path: %v, Link To: %v"
|
||||
return fmt.Errorf(errMsg, resolvedPath, path2)
|
||||
} else {
|
||||
symlinkPathsFollowed[path2] = true
|
||||
}
|
||||
}
|
||||
info2, err := os.Lstat(path2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return walk(path, info2, path2, symlinkPathsFollowed, walkFn)
|
||||
}
|
||||
if info.IsDir() {
|
||||
list, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return walkFn(resolvedPath, info, err)
|
||||
}
|
||||
for _, fileInfo := range list {
|
||||
path2 := filepath.Join(path, fileInfo.Name())
|
||||
var resolvedPath2 string
|
||||
if resolvedPath != "" {
|
||||
resolvedPath2 = filepath.Join(resolvedPath, fileInfo.Name())
|
||||
}
|
||||
err = walk(path2, fileInfo, resolvedPath2, symlinkPathsFollowed, walkFn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,112 +1,7 @@
|
||||
define([
|
||||
'angular',
|
||||
'jquery',
|
||||
'lodash',
|
||||
'app/core/config',
|
||||
'require',
|
||||
'bootstrap',
|
||||
'angular-route',
|
||||
'angular-sanitize',
|
||||
'angular-strap',
|
||||
'angular-dragdrop',
|
||||
'angular-ui',
|
||||
'bindonce',
|
||||
'app/core/core',
|
||||
],
|
||||
function (angular, $, _, config, appLevelRequire) {
|
||||
"use strict";
|
||||
|
||||
var app = angular.module('grafana', []);
|
||||
var register_fns = {};
|
||||
var preBootModules = [];
|
||||
|
||||
// This stores the grafana version number
|
||||
app.constant('grafanaVersion',"@grafanaVersion@");
|
||||
|
||||
/**
|
||||
* Tells the application to watch the module, once bootstraping has completed
|
||||
* the modules controller, service, etc. functions will be overwritten to register directly
|
||||
* with this application.
|
||||
* @param {[type]} module [description]
|
||||
* @return {[type]} [description]
|
||||
*/
|
||||
app.useModule = function (module) {
|
||||
if (preBootModules) {
|
||||
preBootModules.push(module);
|
||||
} else {
|
||||
_.extend(module, register_fns);
|
||||
}
|
||||
// push it into the apps dependencies
|
||||
apps_deps.push(module.name);
|
||||
return module;
|
||||
};
|
||||
|
||||
app.config(function($locationProvider, $controllerProvider, $compileProvider, $filterProvider, $provide) {
|
||||
register_fns.controller = $controllerProvider.register;
|
||||
register_fns.directive = $compileProvider.directive;
|
||||
register_fns.factory = $provide.factory;
|
||||
register_fns.service = $provide.service;
|
||||
register_fns.filter = $filterProvider.register;
|
||||
});
|
||||
|
||||
var apps_deps = [
|
||||
'grafana.core',
|
||||
'ngRoute',
|
||||
'ngSanitize',
|
||||
'$strap.directives',
|
||||
'ang-drag-drop',
|
||||
'grafana',
|
||||
'pasvaz.bindonce',
|
||||
'ui.bootstrap',
|
||||
'ui.bootstrap.tpls',
|
||||
];
|
||||
|
||||
var module_types = ['controllers', 'directives', 'factories', 'services', 'filters', 'routes'];
|
||||
|
||||
_.each(module_types, function (type) {
|
||||
var module_name = 'grafana.'+type;
|
||||
// create the module
|
||||
app.useModule(angular.module(module_name, []));
|
||||
});
|
||||
|
||||
var preBootRequires = ['app/features/all'];
|
||||
var pluginModules = config.bootData.pluginModules || [];
|
||||
|
||||
// add plugin modules
|
||||
for (var i = 0; i < pluginModules.length; i++) {
|
||||
preBootRequires.push(pluginModules[i]);
|
||||
}
|
||||
|
||||
app.boot = function() {
|
||||
require(preBootRequires, function () {
|
||||
|
||||
// disable tool tip animation
|
||||
$.fn.tooltip.defaults.animation = false;
|
||||
|
||||
// bootstrap the app
|
||||
angular
|
||||
.element(document)
|
||||
.ready(function() {
|
||||
angular.bootstrap(document, apps_deps)
|
||||
.invoke(['$rootScope', function ($rootScope) {
|
||||
_.each(preBootModules, function (module) {
|
||||
_.extend(module, register_fns);
|
||||
});
|
||||
|
||||
preBootModules = null;
|
||||
|
||||
$rootScope.requireContext = appLevelRequire;
|
||||
$rootScope.require = function (deps, fn) {
|
||||
var $scope = this;
|
||||
$scope.requireContext(deps, function () {
|
||||
var deps = _.toArray(arguments);
|
||||
fn.apply($scope, deps);
|
||||
});
|
||||
};
|
||||
}]);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
return app;
|
||||
'./grafana'
|
||||
], function(app) {
|
||||
'use strict';
|
||||
// backward compatability hack;
|
||||
return app.default;
|
||||
});
|
||||
|
10
public/app/boot.js
Normal file
10
public/app/boot.js
Normal file
@ -0,0 +1,10 @@
|
||||
(function bootGrafana() {
|
||||
'use strict';
|
||||
|
||||
System.import('app/app').then(function(app) {
|
||||
app.init();
|
||||
}).catch(function(err) {
|
||||
console.log('Loading app module failed: ', err);
|
||||
});
|
||||
|
||||
})();
|
@ -1,3 +1,9 @@
|
||||
// import grafanaCtrl from './grafana_ctrl';
|
||||
//
|
||||
// import * as asd from './sidemenu_ctrl';
|
||||
//
|
||||
// export {grafanaCtrl};
|
||||
|
||||
define([
|
||||
'./grafana_ctrl',
|
||||
'./search_ctrl',
|
||||
|
@ -5,7 +5,7 @@ define([
|
||||
function (angular, coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.controller('ErrorCtrl', function($scope, contextSrv) {
|
||||
coreModule.default.controller('ErrorCtrl', function($scope, contextSrv) {
|
||||
|
||||
var showSideMenu = contextSrv.sidemenu;
|
||||
contextSrv.sidemenu = false;
|
||||
|
@ -1,139 +0,0 @@
|
||||
define([
|
||||
'angular',
|
||||
'lodash',
|
||||
'jquery',
|
||||
'../core_module',
|
||||
'app/core/config',
|
||||
'app/core/store',
|
||||
],
|
||||
function (angular, _, $, coreModule, config, store) {
|
||||
"use strict";
|
||||
|
||||
coreModule.controller('GrafanaCtrl', function($scope, alertSrv, utilSrv, $rootScope, $controller, contextSrv) {
|
||||
|
||||
$scope.init = function() {
|
||||
$scope.contextSrv = contextSrv;
|
||||
|
||||
$scope._ = _;
|
||||
|
||||
$rootScope.profilingEnabled = store.getBool('profilingEnabled');
|
||||
$rootScope.performance = { loadStart: new Date().getTime() };
|
||||
$rootScope.appSubUrl = config.appSubUrl;
|
||||
|
||||
if ($rootScope.profilingEnabled) { $scope.initProfiling(); }
|
||||
|
||||
alertSrv.init();
|
||||
utilSrv.init();
|
||||
|
||||
$scope.dashAlerts = alertSrv;
|
||||
};
|
||||
|
||||
$scope.initDashboard = function(dashboardData, viewScope) {
|
||||
$controller('DashboardCtrl', { $scope: viewScope }).init(dashboardData);
|
||||
};
|
||||
|
||||
$rootScope.onAppEvent = function(name, callback, localScope) {
|
||||
var unbind = $rootScope.$on(name, callback);
|
||||
var callerScope = this;
|
||||
if (callerScope.$id === 1 && !localScope) {
|
||||
console.log('warning rootScope onAppEvent called without localscope');
|
||||
}
|
||||
if (localScope) {
|
||||
callerScope = localScope;
|
||||
}
|
||||
callerScope.$on('$destroy', unbind);
|
||||
};
|
||||
|
||||
$rootScope.appEvent = function(name, payload) {
|
||||
$rootScope.$emit(name, payload);
|
||||
};
|
||||
|
||||
$rootScope.colors = [
|
||||
"#7EB26D","#EAB839","#6ED0E0","#EF843C","#E24D42","#1F78C1","#BA43A9","#705DA0", //1
|
||||
"#508642","#CCA300","#447EBC","#C15C17","#890F02","#0A437C","#6D1F62","#584477", //2
|
||||
"#B7DBAB","#F4D598","#70DBED","#F9BA8F","#F29191","#82B5D8","#E5A8E2","#AEA2E0", //3
|
||||
"#629E51","#E5AC0E","#64B0C8","#E0752D","#BF1B00","#0A50A1","#962D82","#614D93", //4
|
||||
"#9AC48A","#F2C96D","#65C5DB","#F9934E","#EA6460","#5195CE","#D683CE","#806EB7", //5
|
||||
"#3F6833","#967302","#2F575E","#99440A","#58140C","#052B51","#511749","#3F2B5B", //6
|
||||
"#E0F9D7","#FCEACA","#CFFAFF","#F9E2D2","#FCE2DE","#BADFF4","#F9D9F9","#DEDAF7" //7
|
||||
];
|
||||
|
||||
$scope.getTotalWatcherCount = function() {
|
||||
var count = 0;
|
||||
var scopes = 0;
|
||||
var root = $(document.getElementsByTagName('body'));
|
||||
|
||||
var f = function (element) {
|
||||
if (element.data().hasOwnProperty('$scope')) {
|
||||
scopes++;
|
||||
angular.forEach(element.data().$scope.$$watchers, function () {
|
||||
count++;
|
||||
});
|
||||
}
|
||||
|
||||
angular.forEach(element.children(), function (childElement) {
|
||||
f($(childElement));
|
||||
});
|
||||
};
|
||||
|
||||
f(root);
|
||||
$rootScope.performance.scopeCount = scopes;
|
||||
return count;
|
||||
};
|
||||
|
||||
$scope.initProfiling = function() {
|
||||
var count = 0;
|
||||
|
||||
$scope.$watch(function digestCounter() {
|
||||
count++;
|
||||
}, function() {
|
||||
});
|
||||
|
||||
$rootScope.performance.panels = [];
|
||||
|
||||
$scope.$on('refresh', function() {
|
||||
if ($rootScope.performance.panels.length > 0) {
|
||||
var totalRender = 0;
|
||||
var totalQuery = 0;
|
||||
|
||||
_.each($rootScope.performance.panels, function(panelTiming) {
|
||||
totalRender += panelTiming.render;
|
||||
totalQuery += panelTiming.query;
|
||||
});
|
||||
|
||||
console.log('total query: ' + totalQuery);
|
||||
console.log('total render: ' + totalRender);
|
||||
console.log('avg render: ' + totalRender / $rootScope.performance.panels.length);
|
||||
}
|
||||
|
||||
$rootScope.performance.panels = [];
|
||||
});
|
||||
|
||||
$scope.onAppEvent('dashboard-loaded', function() {
|
||||
count = 0;
|
||||
|
||||
setTimeout(function() {
|
||||
console.log("Dashboard::Performance Total Digests: " + count);
|
||||
console.log("Dashboard::Performance Total Watchers: " + $scope.getTotalWatcherCount());
|
||||
console.log("Dashboard::Performance Total ScopeCount: " + $rootScope.performance.scopeCount);
|
||||
|
||||
var timeTaken = $rootScope.performance.allPanelsInitialized - $rootScope.performance.dashboardLoadStart;
|
||||
console.log("Dashboard::Performance - All panels initialized in " + timeTaken + " ms");
|
||||
|
||||
// measure digest performance
|
||||
var rootDigestStart = window.performance.now();
|
||||
for (var i = 0; i < 30; i++) {
|
||||
$rootScope.$apply();
|
||||
}
|
||||
console.log("Dashboard::Performance Root Digest " + ((window.performance.now() - rootDigestStart) / 30));
|
||||
|
||||
}, 3000);
|
||||
|
||||
});
|
||||
|
||||
};
|
||||
|
||||
$scope.init();
|
||||
|
||||
});
|
||||
});
|
139
public/app/core/controllers/grafana_ctrl.ts
Normal file
139
public/app/core/controllers/grafana_ctrl.ts
Normal file
@ -0,0 +1,139 @@
|
||||
///<reference path="../../headers/common.d.ts" />
|
||||
|
||||
import config = require('app/core/config');
|
||||
import store = require('app/core/store');
|
||||
|
||||
import angular from 'angular';
|
||||
import $ from 'jquery';
|
||||
import coreModule from '../core_module';
|
||||
|
||||
coreModule.controller('GrafanaCtrl', function($scope, alertSrv, utilSrv, $rootScope, $controller, contextSrv) {
|
||||
|
||||
$scope.init = function() {
|
||||
$scope.contextSrv = contextSrv;
|
||||
|
||||
$scope._ = _;
|
||||
|
||||
$rootScope.profilingEnabled = store.getBool('profilingEnabled');
|
||||
$rootScope.performance = { loadStart: new Date().getTime() };
|
||||
$rootScope.appSubUrl = config.appSubUrl;
|
||||
|
||||
if ($rootScope.profilingEnabled) { $scope.initProfiling(); }
|
||||
|
||||
alertSrv.init();
|
||||
utilSrv.init();
|
||||
|
||||
$scope.dashAlerts = alertSrv;
|
||||
};
|
||||
|
||||
$scope.initDashboard = function(dashboardData, viewScope) {
|
||||
$controller('DashboardCtrl', { $scope: viewScope }).init(dashboardData);
|
||||
};
|
||||
|
||||
$rootScope.onAppEvent = function(name, callback, localScope) {
|
||||
var unbind = $rootScope.$on(name, callback);
|
||||
var callerScope = this;
|
||||
if (callerScope.$id === 1 && !localScope) {
|
||||
console.log('warning rootScope onAppEvent called without localscope');
|
||||
}
|
||||
if (localScope) {
|
||||
callerScope = localScope;
|
||||
}
|
||||
callerScope.$on('$destroy', unbind);
|
||||
};
|
||||
|
||||
$rootScope.appEvent = function(name, payload) {
|
||||
$rootScope.$emit(name, payload);
|
||||
};
|
||||
|
||||
$rootScope.colors = [
|
||||
"#7EB26D","#EAB839","#6ED0E0","#EF843C","#E24D42","#1F78C1","#BA43A9","#705DA0", //1
|
||||
"#508642","#CCA300","#447EBC","#C15C17","#890F02","#0A437C","#6D1F62","#584477", //2
|
||||
"#B7DBAB","#F4D598","#70DBED","#F9BA8F","#F29191","#82B5D8","#E5A8E2","#AEA2E0", //3
|
||||
"#629E51","#E5AC0E","#64B0C8","#E0752D","#BF1B00","#0A50A1","#962D82","#614D93", //4
|
||||
"#9AC48A","#F2C96D","#65C5DB","#F9934E","#EA6460","#5195CE","#D683CE","#806EB7", //5
|
||||
"#3F6833","#967302","#2F575E","#99440A","#58140C","#052B51","#511749","#3F2B5B", //6
|
||||
"#E0F9D7","#FCEACA","#CFFAFF","#F9E2D2","#FCE2DE","#BADFF4","#F9D9F9","#DEDAF7" //7
|
||||
];
|
||||
|
||||
$scope.getTotalWatcherCount = function() {
|
||||
var count = 0;
|
||||
var scopes = 0;
|
||||
var root = $(document.getElementsByTagName('body'));
|
||||
|
||||
var f = function (element) {
|
||||
if (element.data().hasOwnProperty('$scope')) {
|
||||
scopes++;
|
||||
angular.forEach(element.data().$scope.$$watchers, function () {
|
||||
count++;
|
||||
});
|
||||
}
|
||||
|
||||
angular.forEach(element.children(), function (childElement) {
|
||||
f($(childElement));
|
||||
});
|
||||
};
|
||||
|
||||
f(root);
|
||||
$rootScope.performance.scopeCount = scopes;
|
||||
return count;
|
||||
};
|
||||
|
||||
$scope.initProfiling = function() {
|
||||
var count = 0;
|
||||
|
||||
$scope.$watch(function digestCounter() {
|
||||
count++;
|
||||
}, function() {
|
||||
});
|
||||
|
||||
$rootScope.performance.panels = [];
|
||||
|
||||
$scope.$on('refresh', function() {
|
||||
if ($rootScope.performance.panels.length > 0) {
|
||||
var totalRender = 0;
|
||||
var totalQuery = 0;
|
||||
|
||||
_.each($rootScope.performance.panels, function(panelTiming: any) {
|
||||
totalRender += panelTiming.render;
|
||||
totalQuery += panelTiming.query;
|
||||
});
|
||||
|
||||
console.log('total query: ' + totalQuery);
|
||||
console.log('total render: ' + totalRender);
|
||||
console.log('avg render: ' + totalRender / $rootScope.performance.panels.length);
|
||||
}
|
||||
|
||||
$rootScope.performance.panels = [];
|
||||
});
|
||||
|
||||
$scope.onAppEvent('dashboard-loaded', function() {
|
||||
count = 0;
|
||||
|
||||
setTimeout(function() {
|
||||
console.log("Dashboard::Performance Total Digests: " + count);
|
||||
console.log("Dashboard::Performance Total Watchers: " + $scope.getTotalWatcherCount());
|
||||
console.log("Dashboard::Performance Total ScopeCount: " + $rootScope.performance.scopeCount);
|
||||
|
||||
var timeTaken = $rootScope.performance.allPanelsInitialized - $rootScope.performance.dashboardLoadStart;
|
||||
console.log("Dashboard::Performance - All panels initialized in " + timeTaken + " ms");
|
||||
|
||||
// measure digest performance
|
||||
var rootDigestStart = window.performance.now();
|
||||
for (var i = 0; i < 30; i++) {
|
||||
$rootScope.$apply();
|
||||
}
|
||||
console.log("Dashboard::Performance Root Digest " + ((window.performance.now() - rootDigestStart) / 30));
|
||||
|
||||
}, 3000);
|
||||
|
||||
});
|
||||
|
||||
};
|
||||
|
||||
$scope.init();
|
||||
|
||||
});
|
||||
|
||||
var grafanaCtrl = {};
|
||||
export default grafanaCtrl;
|
@ -7,7 +7,7 @@ define([
|
||||
function (angular, _, $, coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.controller('InspectCtrl', function($scope) {
|
||||
coreModule.default.controller('InspectCtrl', function($scope) {
|
||||
var model = $scope.inspector;
|
||||
|
||||
function getParametersFromQueryString(queryString) {
|
||||
|
@ -6,7 +6,7 @@ define([
|
||||
function (angular, coreModule, config) {
|
||||
'use strict';
|
||||
|
||||
coreModule.controller('InvitedCtrl', function($scope, $routeParams, contextSrv, backendSrv) {
|
||||
coreModule.default.controller('InvitedCtrl', function($scope, $routeParams, contextSrv, backendSrv) {
|
||||
contextSrv.sidemenu = false;
|
||||
$scope.formModel = {};
|
||||
|
||||
|
@ -5,7 +5,7 @@ define([
|
||||
function (angular, coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.controller('JsonEditorCtrl', function($scope) {
|
||||
coreModule.default.controller('JsonEditorCtrl', function($scope) {
|
||||
|
||||
$scope.json = angular.toJson($scope.object, true);
|
||||
$scope.canUpdate = $scope.updateHandler !== void 0;
|
||||
|
@ -6,7 +6,7 @@ define([
|
||||
function (angular, coreModule, config) {
|
||||
'use strict';
|
||||
|
||||
coreModule.controller('LoginCtrl', function($scope, backendSrv, contextSrv, $location) {
|
||||
coreModule.default.controller('LoginCtrl', function($scope, backendSrv, contextSrv, $location) {
|
||||
$scope.formModel = {
|
||||
user: '',
|
||||
email: '',
|
||||
@ -18,6 +18,7 @@ function (angular, coreModule, config) {
|
||||
$scope.googleAuthEnabled = config.googleAuthEnabled;
|
||||
$scope.githubAuthEnabled = config.githubAuthEnabled;
|
||||
$scope.disableUserSignUp = config.disableUserSignUp;
|
||||
$scope.loginHint = config.loginHint;
|
||||
|
||||
$scope.loginMode = true;
|
||||
$scope.submitBtnText = 'Log in';
|
||||
|
@ -5,7 +5,7 @@ define([
|
||||
function (angular, coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.controller('ResetPasswordCtrl', function($scope, contextSrv, backendSrv, $location) {
|
||||
coreModule.default.controller('ResetPasswordCtrl', function($scope, contextSrv, backendSrv, $location) {
|
||||
contextSrv.sidemenu = false;
|
||||
$scope.formModel = {};
|
||||
$scope.mode = 'send';
|
||||
|
@ -7,7 +7,7 @@ define([
|
||||
function (angular, _, coreModule, config) {
|
||||
'use strict';
|
||||
|
||||
coreModule.controller('SearchCtrl', function($scope, $location, $timeout, backendSrv) {
|
||||
coreModule.default.controller('SearchCtrl', function($scope, $location, $timeout, backendSrv) {
|
||||
|
||||
$scope.init = function() {
|
||||
$scope.giveSearchFocus = 0;
|
||||
|
@ -8,7 +8,7 @@ define([
|
||||
function (angular, _, $, coreModule, config) {
|
||||
'use strict';
|
||||
|
||||
coreModule.controller('SideMenuCtrl', function($scope, $location, contextSrv, backendSrv) {
|
||||
coreModule.default.controller('SideMenuCtrl', function($scope, $location, contextSrv, backendSrv) {
|
||||
|
||||
$scope.getUrl = function(url) {
|
||||
return config.appSubUrl + url;
|
||||
@ -122,6 +122,7 @@ function (angular, _, $, coreModule, config) {
|
||||
};
|
||||
|
||||
$scope.init = function() {
|
||||
$scope.showSignout = contextSrv.isSignedIn && !config['authProxyEnabled'];
|
||||
$scope.updateMenu();
|
||||
$scope.$on('$routeChangeSuccess', $scope.updateMenu);
|
||||
};
|
||||
|
@ -2,10 +2,11 @@
|
||||
|
||||
import angular = require('angular');
|
||||
import config = require('app/core/config');
|
||||
import coreModule = require('../core_module');
|
||||
import coreModule from '../core_module';
|
||||
|
||||
export class SignUpCtrl {
|
||||
|
||||
/** @ngInject */
|
||||
constructor(
|
||||
private $scope : any,
|
||||
private $location : any,
|
||||
|
@ -1,28 +1,31 @@
|
||||
///<amd-dependency path="./directives/annotation_tooltip" />
|
||||
///<amd-dependency path="./directives/body_class" />
|
||||
///<amd-dependency path="./directives/config_modal" />
|
||||
///<amd-dependency path="./directives/confirm_click" />
|
||||
///<amd-dependency path="./directives/dash_edit_link" />
|
||||
///<amd-dependency path="./directives/dash_upload" />
|
||||
///<amd-dependency path="./directives/dropdown_typeahead" />
|
||||
///<amd-dependency path="./directives/grafana_version_check" />
|
||||
///<amd-dependency path="./directives/metric_segment" />
|
||||
///<amd-dependency path="./directives/misc" />
|
||||
///<amd-dependency path="./directives/ng_model_on_blur" />
|
||||
///<amd-dependency path="./directives/password_strenght" />
|
||||
///<amd-dependency path="./directives/spectrum_picker" />
|
||||
///<amd-dependency path="./directives/tags" />
|
||||
///<amd-dependency path="./directives/topnav" />
|
||||
///<amd-dependency path="./directives/value_select_dropdown" />
|
||||
///<amd-dependency path="./routes/all" />
|
||||
///<reference path="../headers/common.d.ts" />
|
||||
///<reference path="./mod_defs.d.ts" />
|
||||
|
||||
///<amd-dependency path="./controllers/all" />
|
||||
///<amd-dependency path="./services/all" />
|
||||
///<amd-dependency path="./jquery_extended" />
|
||||
///<amd-dependency path="./partials" />
|
||||
import "./directives/annotation_tooltip";
|
||||
import "./directives/body_class";
|
||||
import "./directives/config_modal";
|
||||
import "./directives/confirm_click";
|
||||
import "./directives/dash_edit_link";
|
||||
import "./directives/dash_upload";
|
||||
import "./directives/dropdown_typeahead";
|
||||
import "./directives/grafana_version_check";
|
||||
import "./directives/metric_segment";
|
||||
import "./directives/misc";
|
||||
import "./directives/ng_model_on_blur";
|
||||
import "./directives/password_strenght";
|
||||
import "./directives/spectrum_picker";
|
||||
import "./directives/tags";
|
||||
import "./directives/topnav";
|
||||
import "./directives/value_select_dropdown";
|
||||
import './jquery_extended';
|
||||
import './partials';
|
||||
|
||||
export * from './directives/array_join'
|
||||
export * from './directives/give_focus'
|
||||
export * from './filters/filters'
|
||||
import {arrayJoin} from './directives/array_join';
|
||||
import * as controllers from 'app/core/controllers/all';
|
||||
import * as services from 'app/core/services/all';
|
||||
import * as routes from 'app/core/routes/all';
|
||||
import './filters/filters';
|
||||
|
||||
// export * from './directives/give_focus'
|
||||
|
||||
export {arrayJoin, controllers, services, routes};
|
||||
|
@ -1,5 +1,4 @@
|
||||
///<reference path="../headers/common.d.ts" />
|
||||
|
||||
import angular = require('angular');
|
||||
|
||||
export = angular.module('grafana.core', ['ngRoute']);
|
||||
import angular from 'angular';
|
||||
export default angular.module('grafana.core', ['ngRoute']);
|
||||
|
@ -6,7 +6,7 @@ define([
|
||||
function ($, _, coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('annotationTooltip', function($sanitize, dashboardSrv, $compile) {
|
||||
coreModule.default.directive('annotationTooltip', function($sanitize, dashboardSrv, $compile) {
|
||||
|
||||
function sanitizeString(str) {
|
||||
try {
|
||||
|
@ -1,8 +1,9 @@
|
||||
///<reference path="../../headers/common.d.ts" />
|
||||
|
||||
import angular = require('angular');
|
||||
import _ = require('lodash');
|
||||
import coreModule = require('../core_module');
|
||||
|
||||
import angular from 'angular';
|
||||
import coreModule from '../core_module';
|
||||
|
||||
export function arrayJoin() {
|
||||
'use strict';
|
||||
|
@ -6,7 +6,7 @@ define([
|
||||
function (_, $, coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('bodyClass', function() {
|
||||
coreModule.default.directive('bodyClass', function() {
|
||||
return {
|
||||
link: function($scope, elem) {
|
||||
|
||||
|
@ -6,7 +6,7 @@ define([
|
||||
function (_, $, coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('configModal', function($modal, $q, $timeout) {
|
||||
coreModule.default.directive('configModal', function($modal, $q, $timeout) {
|
||||
return {
|
||||
restrict: 'A',
|
||||
link: function(scope, elem, attrs) {
|
||||
|
@ -4,7 +4,7 @@ define([
|
||||
function (coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('confirmClick', function() {
|
||||
coreModule.default.directive('confirmClick', function() {
|
||||
return {
|
||||
restrict: 'A',
|
||||
link: function(scope, elem, attrs) {
|
||||
|
@ -11,7 +11,7 @@ function ($, coreModule) {
|
||||
'templating': { src: 'app/features/templating/partials/editor.html', title: "Templating" }
|
||||
};
|
||||
|
||||
coreModule.directive('dashEditorLink', function($timeout) {
|
||||
coreModule.default.directive('dashEditorLink', function($timeout) {
|
||||
return {
|
||||
restrict: 'A',
|
||||
link: function(scope, elem, attrs) {
|
||||
@ -27,7 +27,7 @@ function ($, coreModule) {
|
||||
};
|
||||
});
|
||||
|
||||
coreModule.directive('dashEditorView', function($compile, $location) {
|
||||
coreModule.default.directive('dashEditorView', function($compile, $location) {
|
||||
return {
|
||||
restrict: 'A',
|
||||
link: function(scope, elem) {
|
||||
|
@ -5,7 +5,7 @@ define([
|
||||
function (coreModule, kbn) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('dashUpload', function(timer, alertSrv, $location) {
|
||||
coreModule.default.directive('dashUpload', function(timer, alertSrv, $location) {
|
||||
return {
|
||||
restrict: 'A',
|
||||
link: function(scope) {
|
||||
|
@ -6,7 +6,7 @@ define([
|
||||
function (_, $, coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('dropdownTypeahead', function($compile) {
|
||||
coreModule.default.directive('dropdownTypeahead', function($compile) {
|
||||
|
||||
var inputTemplate = '<input type="text"'+
|
||||
' class="tight-form-input input-medium tight-form-input"' +
|
||||
|
@ -1,9 +1,9 @@
|
||||
///<reference path="../../headers/common.d.ts" />
|
||||
|
||||
import angular = require('angular');
|
||||
import coreModule = require('../core_module');
|
||||
import coreModule from '../core_module';
|
||||
|
||||
coreModule.directive('giveFocus', function() {
|
||||
coreModule.default.directive('giveFocus', function() {
|
||||
return function(scope, element, attrs) {
|
||||
element.click(function(e) {
|
||||
e.stopPropagation();
|
||||
|
@ -4,7 +4,7 @@ define([
|
||||
function (coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('grafanaVersionCheck', function($http, contextSrv) {
|
||||
coreModule.default.directive('grafanaVersionCheck', function($http, contextSrv) {
|
||||
return {
|
||||
restrict: 'A',
|
||||
link: function(scope, elem) {
|
||||
|
@ -6,7 +6,7 @@ define([
|
||||
function (_, $, coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('metricSegment', function($compile, $sce) {
|
||||
coreModule.default.directive('metricSegment', function($compile, $sce) {
|
||||
var inputTemplate = '<input type="text" data-provide="typeahead" ' +
|
||||
' class="tight-form-clear-input input-medium"' +
|
||||
' spellcheck="false" style="display:none"></input>';
|
||||
@ -55,8 +55,8 @@ function (_, $, coreModule) {
|
||||
});
|
||||
};
|
||||
|
||||
$scope.switchToLink = function() {
|
||||
if (linkMode) { return; }
|
||||
$scope.switchToLink = function(fromClick) {
|
||||
if (linkMode && !fromClick) { return; }
|
||||
|
||||
clearTimeout(cancelBlur);
|
||||
cancelBlur = null;
|
||||
@ -69,7 +69,7 @@ function (_, $, coreModule) {
|
||||
$scope.inputBlur = function() {
|
||||
// happens long before the click event on the typeahead options
|
||||
// need to have long delay because the blur
|
||||
cancelBlur = setTimeout($scope.switchToLink, 100);
|
||||
cancelBlur = setTimeout($scope.switchToLink, 200);
|
||||
};
|
||||
|
||||
$scope.source = function(query, callback) {
|
||||
@ -100,7 +100,7 @@ function (_, $, coreModule) {
|
||||
}
|
||||
|
||||
$input.val(value);
|
||||
$scope.switchToLink();
|
||||
$scope.switchToLink(true);
|
||||
|
||||
return value;
|
||||
};
|
||||
@ -157,7 +157,7 @@ function (_, $, coreModule) {
|
||||
};
|
||||
});
|
||||
|
||||
coreModule.directive('metricSegmentModel', function(uiSegmentSrv, $q) {
|
||||
coreModule.default.directive('metricSegmentModel', function(uiSegmentSrv, $q) {
|
||||
return {
|
||||
template: '<metric-segment segment="segment" get-options="getOptionsInternal()" on-change="onSegmentChange()"></metric-segment>',
|
||||
restrict: 'E',
|
||||
|
@ -6,7 +6,7 @@ define([
|
||||
function (angular, coreModule, kbn) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('tip', function($compile) {
|
||||
coreModule.default.directive('tip', function($compile) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
link: function(scope, elem, attrs) {
|
||||
@ -18,7 +18,7 @@ function (angular, coreModule, kbn) {
|
||||
};
|
||||
});
|
||||
|
||||
coreModule.directive('watchChange', function() {
|
||||
coreModule.default.directive('watchChange', function() {
|
||||
return {
|
||||
scope: { onchange: '&watchChange' },
|
||||
link: function(scope, element) {
|
||||
@ -31,7 +31,7 @@ function (angular, coreModule, kbn) {
|
||||
};
|
||||
});
|
||||
|
||||
coreModule.directive('editorOptBool', function($compile) {
|
||||
coreModule.default.directive('editorOptBool', function($compile) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
link: function(scope, elem, attrs) {
|
||||
@ -51,7 +51,7 @@ function (angular, coreModule, kbn) {
|
||||
};
|
||||
});
|
||||
|
||||
coreModule.directive('editorCheckbox', function($compile, $interpolate) {
|
||||
coreModule.default.directive('editorCheckbox', function($compile, $interpolate) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
link: function(scope, elem, attrs) {
|
||||
@ -73,7 +73,7 @@ function (angular, coreModule, kbn) {
|
||||
};
|
||||
});
|
||||
|
||||
coreModule.directive('gfDropdown', function ($parse, $compile, $timeout) {
|
||||
coreModule.default.directive('gfDropdown', function ($parse, $compile, $timeout) {
|
||||
function buildTemplate(items, placement) {
|
||||
var upclass = placement === 'top' ? 'dropup' : '';
|
||||
var ul = [
|
||||
|
@ -6,7 +6,7 @@ define([
|
||||
function (coreModule, kbn, rangeUtil) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('ngModelOnblur', function() {
|
||||
coreModule.default.directive('ngModelOnblur', function() {
|
||||
return {
|
||||
restrict: 'A',
|
||||
priority: 1,
|
||||
@ -26,7 +26,7 @@ function (coreModule, kbn, rangeUtil) {
|
||||
};
|
||||
});
|
||||
|
||||
coreModule.directive('emptyToNull', function () {
|
||||
coreModule.default.directive('emptyToNull', function () {
|
||||
return {
|
||||
restrict: 'A',
|
||||
require: 'ngModel',
|
||||
@ -39,7 +39,7 @@ function (coreModule, kbn, rangeUtil) {
|
||||
};
|
||||
});
|
||||
|
||||
coreModule.directive('validTimeSpan', function() {
|
||||
coreModule.default.directive('validTimeSpan', function() {
|
||||
return {
|
||||
require: 'ngModel',
|
||||
link: function(scope, elm, attrs, ctrl) {
|
||||
|
@ -4,7 +4,7 @@ define([
|
||||
function (coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('passwordStrength', function() {
|
||||
coreModule.default.directive('passwordStrength', function() {
|
||||
var template = '<div class="password-strength small" ng-if="!loginMode" ng-class="strengthClass">' +
|
||||
'<em>{{strengthText}}</em>' +
|
||||
'</div>';
|
||||
|
@ -6,7 +6,7 @@ define([
|
||||
function (angular, coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('spectrumPicker', function() {
|
||||
coreModule.default.directive('spectrumPicker', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
require: 'ngModel',
|
||||
|
@ -39,7 +39,7 @@ function (angular, $, coreModule) {
|
||||
element.css("border-color", borderColor);
|
||||
}
|
||||
|
||||
coreModule.directive('tagColorFromName', function() {
|
||||
coreModule.default.directive('tagColorFromName', function() {
|
||||
return {
|
||||
scope: { tagColorFromName: "=" },
|
||||
link: function (scope, element) {
|
||||
@ -48,7 +48,7 @@ function (angular, $, coreModule) {
|
||||
};
|
||||
});
|
||||
|
||||
coreModule.directive('bootstrapTagsinput', function() {
|
||||
coreModule.default.directive('bootstrapTagsinput', function() {
|
||||
|
||||
function getItemProperty(scope, property) {
|
||||
if (!property) {
|
||||
|
@ -4,7 +4,7 @@ define([
|
||||
function (coreModule) {
|
||||
'use strict';
|
||||
|
||||
coreModule.directive('topnav', function($rootScope, contextSrv) {
|
||||
coreModule.default.directive('topnav', function($rootScope, contextSrv) {
|
||||
return {
|
||||
restrict: 'E',
|
||||
transclude: true,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user