mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge branch 'master' into panel-title-menu-ux
This commit is contained in:
commit
4206f98ba4
@ -3,7 +3,7 @@ root = true
|
|||||||
|
|
||||||
[*.go]
|
[*.go]
|
||||||
indent_style = tab
|
indent_style = tab
|
||||||
indent_size = 2
|
indent_size = 4
|
||||||
charset = utf-8
|
charset = utf-8
|
||||||
trim_trailing_whitespace = true
|
trim_trailing_whitespace = true
|
||||||
insert_final_newline = true
|
insert_final_newline = true
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -25,6 +25,7 @@ public/css/*.min.css
|
|||||||
*.swp
|
*.swp
|
||||||
.idea/
|
.idea/
|
||||||
*.iml
|
*.iml
|
||||||
|
*.tmp
|
||||||
.vscode/
|
.vscode/
|
||||||
|
|
||||||
/data/*
|
/data/*
|
||||||
|
100
CHANGELOG.md
100
CHANGELOG.md
@ -1,25 +1,117 @@
|
|||||||
# 4.3.0 (unreleased)
|
# 4.4.0 (unreleased)
|
||||||
|
|
||||||
|
## New Features
|
||||||
|
**Dashboard History**: View dashboard version history, compare any two versions (summary & json diffs), restore to old version. This big feature
|
||||||
|
was contributed by **Walmart Labs**. Big thanks to them for this massive contribution!
|
||||||
|
Initial feature request: [#4638](https://github.com/grafana/grafana/issues/4638)
|
||||||
|
Pull Request: [#8472](https://github.com/grafana/grafana/pull/8472)
|
||||||
|
|
||||||
|
## Enhancements
|
||||||
|
* **Elasticsearch**: Added filter aggregation label [#8420](https://github.com/grafana/grafana/pull/8420), thx [@tianzk](github.com/tianzk)
|
||||||
|
* **Sensu**: Added option for source and handler [#8405](https://github.com/grafana/grafana/pull/8405), thx [@joemiller](github.com/joemiller)
|
||||||
|
* **CSV**: Configurable csv export datetime format [#8058](https://github.com/grafana/grafana/issues/8058), thx [@cederigo](github.com/cederigo)
|
||||||
|
|
||||||
|
# 4.3.2 (2017-05-31)
|
||||||
|
|
||||||
|
## Bug fixes
|
||||||
|
|
||||||
|
* **InfluxDB**: Fixed issue with query editor not showing ALIAS BY input field when in text editor mode [#8459](https://github.com/grafana/grafana/issues/8459)
|
||||||
|
* **Graph Log Scale**: Fixed issue with log scale going below x-axis [#8244](https://github.com/grafana/grafana/issues/8244)
|
||||||
|
* **Playlist**: Fixed dashboard play order issue [#7688](https://github.com/grafana/grafana/issues/7688)
|
||||||
|
* **Elasticsearch**: Fixed table query issue with ES 2.x [#8467](https://github.com/grafana/grafana/issues/8467), thx [@goldeelox](https://github.com/goldeelox)
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
* **Lazy Loading Of Panels**: Panels are no longer loaded as they are scrolled into view, this was reverted due to Chrome bug, might be reintroduced when Chrome fixes it's JS blocking behavior on scroll. [#8500](https://github.com/grafana/grafana/issues/8500)
|
||||||
|
|
||||||
|
# 4.3.1 (2017-05-23)
|
||||||
|
|
||||||
|
## Bug fixes
|
||||||
|
|
||||||
|
* **S3 image upload**: Fixed image url issue for us-east-1 (us standard) region. If you were missing slack images for alert notifications this should fix it. [#8444](https://github.com/grafana/grafana/issues/8444)
|
||||||
|
|
||||||
|
# 4.3.0-stable (2017-05-23)
|
||||||
|
|
||||||
|
## Bug fixes
|
||||||
|
|
||||||
|
* **Gzip**: Fixed crash when gzip was enabled [#8380](https://github.com/grafana/grafana/issues/8380)
|
||||||
|
* **Graphite**: Fixed issue with Toggle edit mode did in query editor [#8377](https://github.com/grafana/grafana/issues/8377)
|
||||||
|
* **Alerting**: Fixed issue with state history not showing query execution errors [#8412](https://github.com/grafana/grafana/issues/8412)
|
||||||
|
* **Alerting**: Fixed issue with missing state history events/annotations when using sqlite3 database [#7992](https://github.com/grafana/grafana/issues/7992)
|
||||||
|
* **Sqlite**: Fixed with database table locked and using sqlite3 database [#7992](https://github.com/grafana/grafana/issues/7992)
|
||||||
|
* **Alerting**: Fixed issue with annotations showing up in unsaved dashboards, new graph & alert panel. [#8361](https://github.com/grafana/grafana/issues/8361)
|
||||||
|
* **webdav**: Fixed http proxy env variable support for webdav image upload [#7922](https://github.com/grafana/grafana/issues/79222), thx [@berghauz](https://github.com/berghauz)
|
||||||
|
* **Prometheus**: Fixed issue with hiding query [#8413](https://github.com/grafana/grafana/issues/8413)
|
||||||
|
|
||||||
|
## Enhancements
|
||||||
|
|
||||||
|
* **VictorOps**: Now supports panel image & auto resolve [#8431](https://github.com/grafana/grafana/pull/8431), thx [@davidmscott](https://github.com/davidmscott)
|
||||||
|
* **Alerting**: Alert annotations now provide more info [#8421](https://github.com/grafana/grafana/pull/8421)
|
||||||
|
|
||||||
|
# 4.3.0-beta1 (2017-05-12)
|
||||||
|
|
||||||
## Enhancements
|
## Enhancements
|
||||||
|
|
||||||
* **InfluxDB**: influxdb query builder support for ORDER BY and LIMIT (allows TOPN queries) [#6065](https://github.com/grafana/grafana/issues/6065) Support influxdb's SLIMIT Feature [#7232](https://github.com/grafana/grafana/issues/7232) thx [@thuck](https://github.com/thuck)
|
* **InfluxDB**: influxdb query builder support for ORDER BY and LIMIT (allows TOPN queries) [#6065](https://github.com/grafana/grafana/issues/6065) Support influxdb's SLIMIT Feature [#7232](https://github.com/grafana/grafana/issues/7232) thx [@thuck](https://github.com/thuck)
|
||||||
* **InfluxDB**: Small fix for the "glow" when focus the field for LIMIT and SLIMIT [#7799](https://github.com/grafana/grafana/pull/7799) thx [@thuck](https://github.com/thuck)
|
|
||||||
* **Panels**: Delay loading & Lazy load panels as they become visible (scrolled into view) [#5216](https://github.com/grafana/grafana/issues/5216) thx [@jifwin](https://github.com/jifwin)
|
* **Panels**: Delay loading & Lazy load panels as they become visible (scrolled into view) [#5216](https://github.com/grafana/grafana/issues/5216) thx [@jifwin](https://github.com/jifwin)
|
||||||
* **Graph**: Support auto grid min/max when using log scale [#3090](https://github.com/grafana/grafana/issues/3090), thx [@bigbenhur](https://github.com/bigbenhur)
|
* **Graph**: Support auto grid min/max when using log scale [#3090](https://github.com/grafana/grafana/issues/3090), thx [@bigbenhur](https://github.com/bigbenhur)
|
||||||
* **Elasticsearch**: Support histogram aggregations [#3164](https://github.com/grafana/grafana/issues/3164)
|
* **Graph**: Support for histograms [#600](https://github.com/grafana/grafana/issues/600)
|
||||||
|
* **Prometheus**: Support table response formats (column per label) [#6140](https://github.com/grafana/grafana/issues/6140), thx [@mtanda](https://github.com/mtanda)
|
||||||
|
* **Single Stat Panel**: support for non time series data [#6564](https://github.com/grafana/grafana/issues/6564)
|
||||||
|
* **Server**: Monitoring Grafana (health check endpoint) [#3302](https://github.com/grafana/grafana/issues/3302)
|
||||||
|
* **Heatmap**: Heatmap Panel [#7934](https://github.com/grafana/grafana/pull/7934)
|
||||||
|
* **Elasticsearch**: histogram aggregation [#3164](https://github.com/grafana/grafana/issues/3164)
|
||||||
|
|
||||||
## Minor Enchancements
|
## Minor Enhancements
|
||||||
|
|
||||||
|
* **InfluxDB**: Small fix for the "glow" when focus the field for LIMIT and SLIMIT [#7799](https://github.com/grafana/grafana/pull/7799) thx [@thuck](https://github.com/thuck)
|
||||||
* **Prometheus**: Make Prometheus query field a textarea [#7663](https://github.com/grafana/grafana/issues/7663), thx [@hagen1778](https://github.com/hagen1778)
|
* **Prometheus**: Make Prometheus query field a textarea [#7663](https://github.com/grafana/grafana/issues/7663), thx [@hagen1778](https://github.com/hagen1778)
|
||||||
|
* **Prometheus**: Step parameter changed semantics to min step to reduce the load on Prometheus and rendering in browser [#8073](https://github.com/grafana/grafana/pull/8073), thx [@bobrik](https://github.com/bobrik)
|
||||||
* **Templating**: Should not be possible to create self-referencing (recursive) template variable definitions [#7614](https://github.com/grafana/grafana/issues/7614) thx [@thuck](https://github.com/thuck)
|
* **Templating**: Should not be possible to create self-referencing (recursive) template variable definitions [#7614](https://github.com/grafana/grafana/issues/7614) thx [@thuck](https://github.com/thuck)
|
||||||
* **Cloudwatch**: Correctly obtain IAM roles within ECS container tasks [#7892](https://github.com/grafana/grafana/issues/7892) thx [@gomlgs](https://github.com/gomlgs)
|
* **Cloudwatch**: Correctly obtain IAM roles within ECS container tasks [#7892](https://github.com/grafana/grafana/issues/7892) thx [@gomlgs](https://github.com/gomlgs)
|
||||||
* **Units**: New number format: Scientific notation [#7781](https://github.com/grafana/grafana/issues/7781) thx [@cadnce](https://github.com/cadnce)
|
* **Units**: New number format: Scientific notation [#7781](https://github.com/grafana/grafana/issues/7781) thx [@cadnce](https://github.com/cadnce)
|
||||||
* **Oauth**: Add common type for oauth authorization errors [#6428](https://github.com/grafana/grafana/issues/6428) thx [@amenzhinsky](https://github.com/amenzhinsky)
|
* **Oauth**: Add common type for oauth authorization errors [#6428](https://github.com/grafana/grafana/issues/6428) thx [@amenzhinsky](https://github.com/amenzhinsky)
|
||||||
* **Templating**: Data source variable now supports multi value and panel repeats [#7030](https://github.com/grafana/grafana/issues/7030) thx [@mtanda](https://github.com/mtanda)
|
* **Templating**: Data source variable now supports multi value and panel repeats [#7030](https://github.com/grafana/grafana/issues/7030) thx [@mtanda](https://github.com/mtanda)
|
||||||
|
* **Telegram**: Telegram alert is not sending metric and legend. [#8110](https://github.com/grafana/grafana/issues/8110), thx [@bashgeek](https://github.com/bashgeek)
|
||||||
|
* **Graph**: Support dashed lines [#514](https://github.com/grafana/grafana/issues/514), thx [@smalik03](https://github.com/smalik03)
|
||||||
|
* **Table**: Support to change column header text [#3551](https://github.com/grafana/grafana/issues/3551)
|
||||||
|
* **Alerting**: Better error when SMTP is not configured [#8093](https://github.com/grafana/grafana/issues/8093)
|
||||||
|
* **Pushover**: Add an option to attach graph image link in Pushover notification [#8043](https://github.com/grafana/grafana/issues/8043) thx [@devkid](https://github.com/devkid)
|
||||||
|
* **WebDAV**: Allow to set different ImageBaseUrl for WebDAV upload and image link [#7914](https://github.com/grafana/grafana/issues/7914)
|
||||||
|
* **Panels**: type-ahead mixed datasource selection [#7697](https://github.com/grafana/grafana/issues/7697) thx [@mtanda](https://github.com/mtanda)
|
||||||
|
* **Security**:User enumeration problem [#7619](https://github.com/grafana/grafana/issues/7619)
|
||||||
|
* **InfluxDB**: Register new queries available in InfluxDB - Holt Winters [#5619](https://github.com/grafana/grafana/issues/5619) thx [@rikkuness](https://github.com/rikkuness)
|
||||||
|
* **Server**: Support listening on a UNIX socket [#4030](https://github.com/grafana/grafana/issues/4030), thx [@mitjaziv](https://github.com/mitjaziv)
|
||||||
|
* **Graph**: Support log scaling for values smaller 1 [#5278](https://github.com/grafana/grafana/issues/5278)
|
||||||
|
* **InfluxDB**: Slow 'select measurement' rendering for InfluxDB [#2524](https://github.com/grafana/grafana/issues/2524), thx [@sbhenderson](https://github.com/sbhenderson)
|
||||||
|
* **Config**: Configurable signout menu activation [#7968](https://github.com/grafana/grafana/pull/7968), thx [@seuf](https://github.com/seuf)
|
||||||
|
|
||||||
## Fixes
|
## Fixes
|
||||||
* **Table Panel**: Fixed annotation display in table panel, [#8023](https://github.com/grafana/grafana/issues/8023)
|
* **Table Panel**: Fixed annotation display in table panel, [#8023](https://github.com/grafana/grafana/issues/8023)
|
||||||
* **Dashboard**: If refresh is blocked due to tab not visible, then refresh when it becomes visible [#8076](https://github.com/grafana/grafana/issues/8076) thanks [@SimenB](https://github.com/SimenB)
|
* **Dashboard**: If refresh is blocked due to tab not visible, then refresh when it becomes visible [#8076](https://github.com/grafana/grafana/issues/8076) thanks [@SimenB](https://github.com/SimenB)
|
||||||
|
* **Snapshots**: Fixed problem with annotations & snapshots [#7659](https://github.com/grafana/grafana/issues/7659)
|
||||||
|
* **Graph**: MetricSegment loses type when value is an asterisk [#8277](https://github.com/grafana/grafana/issues/8277), thx [@Gordiychuk](https://github.com/Gordiychuk)
|
||||||
|
* **Alerting**: Alert notifications do not show charts when using a non public S3 bucket [#8250](https://github.com/grafana/grafana/issues/8250) thx [@rogerswingle](https://github.com/rogerswingle)
|
||||||
|
* **Graph**: 100% client CPU usage on red alert glow animation [#8222](https://github.com/grafana/grafana/issues/8222)
|
||||||
|
* **InfluxDB**: Templating: "All" query does match too much [#8165](https://github.com/grafana/grafana/issues/8165)
|
||||||
|
* **Dashboard**: Description tooltip is not fully displayed [#7970](https://github.com/grafana/grafana/issues/7970)
|
||||||
|
* **Proxy**: Redirect after switching Org does not obey sub path in root_url (using reverse proxy) [#8089](https://github.com/grafana/grafana/issues/8089)
|
||||||
|
* **Templating**: Restoration of ad-hoc variable from URL does not work correctly [#8056](https://github.com/grafana/grafana/issues/8056) thx [@tamayika](https://github.com/tamayika)
|
||||||
|
* **InfluxDB**: timeFilter cannot be used twice in alerts [#7969](https://github.com/grafana/grafana/issues/7969)
|
||||||
|
* **MySQL**: 4-byte UTF8 not supported when using MySQL database (allows Emojis) [#7958](https://github.com/grafana/grafana/issues/7958)
|
||||||
|
* **Alerting**: api/alerts and api/alert/:id hold previous data for "message" and "Message" field when field value is changed from "some string" to empty string. [#7927](https://github.com/grafana/grafana/issues/7927)
|
||||||
|
* **Graph**: Cannot add fill below to series override [#7916](https://github.com/grafana/grafana/issues/7916)
|
||||||
|
* **InfluxDB**: Influxb Datasource test passes even if the Database doesn't exist [#7864](https://github.com/grafana/grafana/issues/7864)
|
||||||
|
* **Prometheus**: Displaying Prometheus annotations is incredibly slow [#7750](https://github.com/grafana/grafana/issues/7750), thx [@mtanda](https://github.com/mtanda)
|
||||||
|
* **Graphite**: grafana generates empty find query to graphite -> 422 Unprocessable Entity [#7740](https://github.com/grafana/grafana/issues/7740)
|
||||||
|
* **Admin**: make organisation filter case insensitive [#8194](https://github.com/grafana/grafana/issues/8194), thx [@Alexander-N](https://github.com/Alexander-N)
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
* **Elasticsearch**: Changed elasticsearch Terms aggregation to default to Min Doc Count to 1, and sort order to Top [#8321](https://github.com/grafana/grafana/issues/8321)
|
||||||
|
|
||||||
|
## Tech
|
||||||
|
|
||||||
|
* **Library Upgrade**: inconshreveable/log15 outdated - no support for solaris [#8262](https://github.com/grafana/grafana/issues/8262)
|
||||||
|
* **Library Upgrade**: Upgrade Macaron [#7600](https://github.com/grafana/grafana/issues/7600)
|
||||||
|
|
||||||
# 4.2.0 (2017-03-22)
|
# 4.2.0 (2017-03-22)
|
||||||
## Minor Enhancements
|
## Minor Enhancements
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Copyright 2014-2016 Torkel Ödegaard, Raintank Inc.
|
Copyright 2014-2017 Torkel Ödegaard, Raintank Inc.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you
|
Licensed under the Apache License, Version 2.0 (the "License"); you
|
||||||
may not use this file except in compliance with the License. You may
|
may not use this file except in compliance with the License. You may
|
||||||
|
37
README.md
37
README.md
@ -1,4 +1,4 @@
|
|||||||
[Grafana](https://grafana.com) [](https://circleci.com/gh/grafana/grafana)
|
[Grafana](https://grafana.com) [](https://circleci.com/gh/grafana/grafana)
|
||||||
================
|
================
|
||||||
[Website](https://grafana.com) |
|
[Website](https://grafana.com) |
|
||||||
[Twitter](https://twitter.com/grafana) |
|
[Twitter](https://twitter.com/grafana) |
|
||||||
@ -17,14 +17,9 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB.
|
|||||||
- [What's New in Grafana 4.0](http://docs.grafana.org/guides/whats-new-in-v4/)
|
- [What's New in Grafana 4.0](http://docs.grafana.org/guides/whats-new-in-v4/)
|
||||||
- [What's New in Grafana 4.1](http://docs.grafana.org/guides/whats-new-in-v4-1/)
|
- [What's New in Grafana 4.1](http://docs.grafana.org/guides/whats-new-in-v4-1/)
|
||||||
- [What's New in Grafana 4.2](http://docs.grafana.org/guides/whats-new-in-v4-2/)
|
- [What's New in Grafana 4.2](http://docs.grafana.org/guides/whats-new-in-v4-2/)
|
||||||
|
- [What's New in Grafana 4.3](http://docs.grafana.org/guides/whats-new-in-v4-3/)
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
### Graphite Target Editor
|
|
||||||
- Graphite target expression parser
|
|
||||||
- Feature rich query composer
|
|
||||||
- Quickly add and edit functions & parameters
|
|
||||||
- Templated queries
|
|
||||||
- [See it in action](http://docs.grafana.org/datasources/graphite/)
|
|
||||||
|
|
||||||
### Graphing
|
### Graphing
|
||||||
- Fast rendering, even over large timespans
|
- Fast rendering, even over large timespans
|
||||||
@ -48,16 +43,23 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB.
|
|||||||
- [Time range controls](http://docs.grafana.org/reference/timerange/)
|
- [Time range controls](http://docs.grafana.org/reference/timerange/)
|
||||||
- [Share snapshots publicly](http://docs.grafana.org/v2.0/reference/sharing/)
|
- [Share snapshots publicly](http://docs.grafana.org/v2.0/reference/sharing/)
|
||||||
|
|
||||||
### Elasticsearch
|
|
||||||
- Feature rich query editor UI
|
|
||||||
|
|
||||||
### InfluxDB
|
### InfluxDB
|
||||||
- Use InfluxDB as a metric data source, annotation source
|
- Use InfluxDB as a metric data source, annotation source
|
||||||
- Query editor with series and column typeahead, easy group by and function selection
|
- Query editor with field and tag typeahead, easy group by and function selection
|
||||||
|
|
||||||
### OpenTSDB
|
### Graphite
|
||||||
- Use as metric data source
|
- Graphite target expression parser
|
||||||
- Query editor with metric name typeahead and tag filtering
|
- Feature rich query composer
|
||||||
|
- Quickly add and edit functions & parameters
|
||||||
|
- Templated queries
|
||||||
|
- [See it in action](http://docs.grafana.org/datasources/graphite/)
|
||||||
|
|
||||||
|
### Elasticsearch, Prometheus & OpenTSDB
|
||||||
|
- Feature rich query editor UI
|
||||||
|
|
||||||
|
### Alerting
|
||||||
|
- Define alert rules using graphs & query conditions
|
||||||
|
- Schedule & evalute alert rules, send notifications to Slack, Hipchat, Email, PagerDuty, etc.
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
There are no dependencies except an external time series data store. For dashboards and user accounts Grafana can use an embedded
|
There are no dependencies except an external time series data store. For dashboards and user accounts Grafana can use an embedded
|
||||||
@ -78,8 +80,8 @@ the latest master builds [here](https://grafana.com/grafana/download)
|
|||||||
|
|
||||||
### Dependencies
|
### Dependencies
|
||||||
|
|
||||||
- Go 1.8
|
- Go 1.8.1
|
||||||
- NodeJS v4+
|
- NodeJS LTS
|
||||||
|
|
||||||
### Get Code
|
### Get Code
|
||||||
|
|
||||||
@ -144,8 +146,7 @@ Create a custom.ini in the conf directory to override default configuration opti
|
|||||||
You only need to add the options you want to override. Config files are applied in the order of:
|
You only need to add the options you want to override. Config files are applied in the order of:
|
||||||
|
|
||||||
1. grafana.ini
|
1. grafana.ini
|
||||||
2. dev.ini (if found)
|
1. custom.ini
|
||||||
3. custom.ini
|
|
||||||
|
|
||||||
## Create a pull request
|
## Create a pull request
|
||||||
Before or after you create a pull request, sign the [contributor license agreement](http://docs.grafana.org/project/cla/).
|
Before or after you create a pull request, sign the [contributor license agreement](http://docs.grafana.org/project/cla/).
|
||||||
|
31
ROADMAP.md
Normal file
31
ROADMAP.md
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# Roadmap (2017-04-23)
|
||||||
|
|
||||||
|
This roadmap is a tentative plan for the core development team. Things change constantly as PRs come in and priorities change.
|
||||||
|
But it will give you an idea of our current vision and plan.
|
||||||
|
|
||||||
|
### Short term (1-4 months)
|
||||||
|
|
||||||
|
- New Heatmap Panel (Implemented and available in master)
|
||||||
|
- Support for MySQL & Postgres as data sources (Work started and a alpha version for MySQL is available in master)
|
||||||
|
- User Groups & Dashboard folders with ACLs (work started, not yet completed, https://github.com/grafana/grafana/issues/1611#issuecomment-287742633)
|
||||||
|
- Improve new user UX
|
||||||
|
- Improve docs
|
||||||
|
- Support for alerting for Elasticsearch (can be tested in [branch](https://github.com/grafana/grafana/tree/alerting-elasticsearch) but needs more work)
|
||||||
|
- Graph annotations (create from grafana, region annotations, better annotation viz)
|
||||||
|
- Improve alerting (clustering, silence rules)
|
||||||
|
|
||||||
|
### Long term
|
||||||
|
|
||||||
|
- Improved dashboard panel layout engine (to make it easier and enable more flexible layouts)
|
||||||
|
- Backend plugins to support more Auth options, Alerting data sources & notifications
|
||||||
|
- Universial time series transformations for any data source (meta queries)
|
||||||
|
- Reporting
|
||||||
|
- Web socket & live data streams
|
||||||
|
- Migrate to Angular2
|
||||||
|
|
||||||
|
|
||||||
|
### Outside contributions
|
||||||
|
We know this is being worked on right now by contributors (and we hope to merge it when it's ready).
|
||||||
|
|
||||||
|
- Dashboard revisions (be able to revert dashboard changes)
|
||||||
|
- Clustering for alert engine (load distribution)
|
2
build.go
2
build.go
@ -235,7 +235,7 @@ func createRpmPackages() {
|
|||||||
defaultFileSrc: "packaging/rpm/sysconfig/grafana-server",
|
defaultFileSrc: "packaging/rpm/sysconfig/grafana-server",
|
||||||
systemdFileSrc: "packaging/rpm/systemd/grafana-server.service",
|
systemdFileSrc: "packaging/rpm/systemd/grafana-server.service",
|
||||||
|
|
||||||
depends: []string{"/sbin/service", "fontconfig"},
|
depends: []string{"/sbin/service", "fontconfig", "freetype", "urw-fonts"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ plugins = data/plugins
|
|||||||
|
|
||||||
#################################### Server ##############################
|
#################################### Server ##############################
|
||||||
[server]
|
[server]
|
||||||
# Protocol (http or https)
|
# Protocol (http, https, socket)
|
||||||
protocol = http
|
protocol = http
|
||||||
|
|
||||||
# The ip address to bind to, empty will bind to all interfaces
|
# The ip address to bind to, empty will bind to all interfaces
|
||||||
@ -57,6 +57,9 @@ enable_gzip = false
|
|||||||
cert_file =
|
cert_file =
|
||||||
cert_key =
|
cert_key =
|
||||||
|
|
||||||
|
# Unix socket path
|
||||||
|
socket = /tmp/grafana.sock
|
||||||
|
|
||||||
#################################### Database ############################
|
#################################### Database ############################
|
||||||
[database]
|
[database]
|
||||||
# You can configure the database connection by specifying type, host, name, user and password
|
# You can configure the database connection by specifying type, host, name, user and password
|
||||||
@ -246,6 +249,7 @@ allowed_domains =
|
|||||||
hosted_domain =
|
hosted_domain =
|
||||||
|
|
||||||
#################################### Grafana.com Auth ####################
|
#################################### Grafana.com Auth ####################
|
||||||
|
# legacy key names (so they work in env variables)
|
||||||
[auth.grafananet]
|
[auth.grafananet]
|
||||||
enabled = false
|
enabled = false
|
||||||
allow_sign_up = true
|
allow_sign_up = true
|
||||||
@ -254,6 +258,14 @@ client_secret = some_secret
|
|||||||
scopes = user:email
|
scopes = user:email
|
||||||
allowed_organizations =
|
allowed_organizations =
|
||||||
|
|
||||||
|
[auth.grafana_com]
|
||||||
|
enabled = false
|
||||||
|
allow_sign_up = true
|
||||||
|
client_id = some_id
|
||||||
|
client_secret = some_secret
|
||||||
|
scopes = user:email
|
||||||
|
allowed_organizations =
|
||||||
|
|
||||||
#################################### Generic OAuth #######################
|
#################################### Generic OAuth #######################
|
||||||
[auth.generic_oauth]
|
[auth.generic_oauth]
|
||||||
name = OAuth
|
name = OAuth
|
||||||
@ -430,6 +442,9 @@ prefix = prod.grafana.%(instance_name)s.
|
|||||||
[grafana_net]
|
[grafana_net]
|
||||||
url = https://grafana.com
|
url = https://grafana.com
|
||||||
|
|
||||||
|
[grafana_com]
|
||||||
|
url = https://grafana.com
|
||||||
|
|
||||||
#################################### External Image Storage ##############
|
#################################### External Image Storage ##############
|
||||||
[external_image_storage]
|
[external_image_storage]
|
||||||
# You can choose between (s3, webdav)
|
# You can choose between (s3, webdav)
|
||||||
@ -444,3 +459,4 @@ secret_key =
|
|||||||
url =
|
url =
|
||||||
username =
|
username =
|
||||||
password =
|
password =
|
||||||
|
public_url =
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
#
|
#
|
||||||
#################################### Server ####################################
|
#################################### Server ####################################
|
||||||
[server]
|
[server]
|
||||||
# Protocol (http or https)
|
# Protocol (http, https, socket)
|
||||||
;protocol = http
|
;protocol = http
|
||||||
|
|
||||||
# The ip address to bind to, empty will bind to all interfaces
|
# The ip address to bind to, empty will bind to all interfaces
|
||||||
@ -59,6 +59,9 @@
|
|||||||
;cert_file =
|
;cert_file =
|
||||||
;cert_key =
|
;cert_key =
|
||||||
|
|
||||||
|
# Unix socket path
|
||||||
|
;socket =
|
||||||
|
|
||||||
#################################### Database ####################################
|
#################################### Database ####################################
|
||||||
[database]
|
[database]
|
||||||
# You can configure the database connection by specifying type, host, name, user and password
|
# You can configure the database connection by specifying type, host, name, user and password
|
||||||
@ -246,7 +249,7 @@
|
|||||||
;allowed_organizations =
|
;allowed_organizations =
|
||||||
|
|
||||||
#################################### Grafana.com Auth ####################
|
#################################### Grafana.com Auth ####################
|
||||||
[auth.grafananet]
|
[auth.grafana_com]
|
||||||
;enabled = false
|
;enabled = false
|
||||||
;allow_sign_up = true
|
;allow_sign_up = true
|
||||||
;client_id = some_id
|
;client_id = some_id
|
||||||
@ -383,7 +386,7 @@
|
|||||||
|
|
||||||
#################################### Grafana.com integration ##########################
|
#################################### Grafana.com integration ##########################
|
||||||
# Url used to to import dashboards directly from Grafana.com
|
# Url used to to import dashboards directly from Grafana.com
|
||||||
[grafana_net]
|
[grafana_com]
|
||||||
;url = https://grafana.com
|
;url = https://grafana.com
|
||||||
|
|
||||||
#################################### External image storage ##########################
|
#################################### External image storage ##########################
|
||||||
@ -399,5 +402,6 @@
|
|||||||
|
|
||||||
[external_image_storage.webdav]
|
[external_image_storage.webdav]
|
||||||
;url =
|
;url =
|
||||||
|
;public_url =
|
||||||
;username =
|
;username =
|
||||||
;password =
|
;password =
|
||||||
|
@ -10,3 +10,5 @@ mysql:
|
|||||||
volumes:
|
volumes:
|
||||||
- /etc/localtime:/etc/localtime:ro
|
- /etc/localtime:/etc/localtime:ro
|
||||||
- /etc/timezone:/etc/timezone:ro
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all]
|
||||||
|
|
||||||
|
20
docker/blocks/mysql_opendata/Dockerfile
Normal file
20
docker/blocks/mysql_opendata/Dockerfile
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
## MySQL with Open Data Set from NYC Open Data (https://data.cityofnewyork.us)
|
||||||
|
|
||||||
|
FROM mysql:latest
|
||||||
|
|
||||||
|
ENV MYSQL_DATABASE="testdata" \
|
||||||
|
MYSQL_ROOT_PASSWORD="rootpass" \
|
||||||
|
MYSQL_USER="grafana" \
|
||||||
|
MYSQL_PASSWORD="password"
|
||||||
|
|
||||||
|
# Install requirement (wget)
|
||||||
|
RUN apt-get update && apt-get install -y wget && apt-get install unzip
|
||||||
|
|
||||||
|
# Fetch NYC Data Set
|
||||||
|
RUN wget https://data.cityofnewyork.us/download/57g5-etyj/application%2Fzip -O /tmp/data.zip && \
|
||||||
|
unzip -j /tmp/data.zip 311_Service_Requests_from_2015.csv -d /var/lib/mysql-files && \
|
||||||
|
rm /tmp/data.zip
|
||||||
|
|
||||||
|
ADD import_csv.sql /docker-entrypoint-initdb.d/
|
||||||
|
|
||||||
|
EXPOSE 3306
|
9
docker/blocks/mysql_opendata/fig
Normal file
9
docker/blocks/mysql_opendata/fig
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
mysql_opendata:
|
||||||
|
build: blocks/mysql_opendata
|
||||||
|
environment:
|
||||||
|
MYSQL_ROOT_PASSWORD: rootpass
|
||||||
|
MYSQL_DATABASE: testdata
|
||||||
|
MYSQL_USER: grafana
|
||||||
|
MYSQL_PASSWORD: password
|
||||||
|
ports:
|
||||||
|
- "3307:3306"
|
80
docker/blocks/mysql_opendata/import_csv.sql
Normal file
80
docker/blocks/mysql_opendata/import_csv.sql
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
use testdata;
|
||||||
|
DROP TABLE IF EXISTS `nyc_open_data`;
|
||||||
|
CREATE TABLE IF NOT EXISTS `nyc_open_data` (
|
||||||
|
UniqueKey bigint(255),
|
||||||
|
`CreatedDate` varchar(255),
|
||||||
|
`ClosedDate` varchar(255),
|
||||||
|
Agency varchar(255),
|
||||||
|
AgencyName varchar(255),
|
||||||
|
ComplaintType varchar(255),
|
||||||
|
Descriptor varchar(255),
|
||||||
|
LocationType varchar(255),
|
||||||
|
IncidentZip varchar(255),
|
||||||
|
IncidentAddress varchar(255),
|
||||||
|
StreetName varchar(255),
|
||||||
|
CrossStreet1 varchar(255),
|
||||||
|
CrossStreet2 varchar(255),
|
||||||
|
IntersectionStreet1 varchar(255),
|
||||||
|
IntersectionStreet2 varchar(255),
|
||||||
|
AddressType varchar(255),
|
||||||
|
City varchar(255),
|
||||||
|
Landmark varchar(255),
|
||||||
|
FacilityType varchar(255),
|
||||||
|
Status varchar(255),
|
||||||
|
`DueDate` varchar(255),
|
||||||
|
ResolutionDescription varchar(2048),
|
||||||
|
`ResolutionActionUpdatedDate` varchar(255),
|
||||||
|
CommunityBoard varchar(255),
|
||||||
|
Borough varchar(255),
|
||||||
|
XCoordinateStatePlane varchar(255),
|
||||||
|
YCoordinateStatePlane varchar(255),
|
||||||
|
ParkFacilityName varchar(255),
|
||||||
|
ParkBorough varchar(255),
|
||||||
|
SchoolName varchar(255),
|
||||||
|
SchoolNumber varchar(255),
|
||||||
|
SchoolRegion varchar(255),
|
||||||
|
SchoolCode varchar(255),
|
||||||
|
SchoolPhoneNumber varchar(255),
|
||||||
|
SchoolAddress varchar(255),
|
||||||
|
SchoolCity varchar(255),
|
||||||
|
SchoolState varchar(255),
|
||||||
|
SchoolZip varchar(255),
|
||||||
|
SchoolNotFound varchar(255),
|
||||||
|
SchoolOrCitywideComplaint varchar(255),
|
||||||
|
VehicleType varchar(255),
|
||||||
|
TaxiCompanyBorough varchar(255),
|
||||||
|
TaxiPickUpLocation varchar(255),
|
||||||
|
BridgeHighwayName varchar(255),
|
||||||
|
BridgeHighwayDirection varchar(255),
|
||||||
|
RoadRamp varchar(255),
|
||||||
|
BridgeHighwaySegment varchar(255),
|
||||||
|
GarageLotName varchar(255),
|
||||||
|
FerryDirection varchar(255),
|
||||||
|
FerryTerminalName varchar(255),
|
||||||
|
Latitude varchar(255),
|
||||||
|
Longitude varchar(255),
|
||||||
|
Location varchar(255)
|
||||||
|
);
|
||||||
|
LOAD DATA INFILE '/var/lib/mysql-files/311_Service_Requests_from_2015.csv' INTO TABLE nyc_open_data FIELDS OPTIONALLY ENCLOSED BY '"' TERMINATED BY ',' IGNORE 1 LINES;
|
||||||
|
UPDATE nyc_open_data SET CreatedDate = STR_TO_DATE(CreatedDate, '%m/%d/%Y %r') WHERE CreatedDate <> '';
|
||||||
|
UPDATE nyc_open_data SET ClosedDate = STR_TO_DATE(ClosedDate, '%m/%d/%Y %r') WHERE ClosedDate <> '';
|
||||||
|
UPDATE nyc_open_data SET DueDate = STR_TO_DATE(DueDate, '%m/%d/%Y %r') WHERE DueDate <> '';
|
||||||
|
UPDATE nyc_open_data SET ResolutionActionUpdatedDate = STR_TO_DATE(ResolutionActionUpdatedDate, '%m/%d/%Y %r') WHERE ResolutionActionUpdatedDate <> '';
|
||||||
|
|
||||||
|
UPDATE nyc_open_data SET CreatedDate=null WHERE CreatedDate = '';
|
||||||
|
UPDATE nyc_open_data SET ClosedDate=null WHERE ClosedDate = '';
|
||||||
|
UPDATE nyc_open_data SET DueDate=null WHERE DueDate = '';
|
||||||
|
UPDATE nyc_open_data SET ResolutionActionUpdatedDate=null WHERE ResolutionActionUpdatedDate = '';
|
||||||
|
|
||||||
|
ALTER TABLE nyc_open_data modify CreatedDate datetime NULL;
|
||||||
|
ALTER TABLE nyc_open_data modify ClosedDate datetime NULL;
|
||||||
|
ALTER TABLE nyc_open_data modify DueDate datetime NULL;
|
||||||
|
ALTER TABLE nyc_open_data modify ResolutionActionUpdatedDate datetime NULL;
|
||||||
|
|
||||||
|
ALTER TABLE `nyc_open_data` ADD INDEX `IX_ComplaintType` (`ComplaintType`);
|
||||||
|
ALTER TABLE `nyc_open_data` ADD INDEX `IX_CreatedDate` (`CreatedDate`);
|
||||||
|
ALTER TABLE `nyc_open_data` ADD INDEX `IX_LocationType` (`LocationType`);
|
||||||
|
ALTER TABLE `nyc_open_data` ADD INDEX `IX_AgencyName` (`AgencyName`);
|
||||||
|
ALTER TABLE `nyc_open_data` ADD INDEX `IX_City` (`City`);
|
||||||
|
|
||||||
|
SYSTEM rm /var/lib/mysql-files/311_Service_Requests_from_2015.csv
|
@ -1,4 +1,4 @@
|
|||||||
snmpd:
|
snmpd:
|
||||||
build: blocks/snmpd
|
image: namshi/smtp
|
||||||
ports:
|
ports:
|
||||||
- "161:161"
|
- "25:25"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
.PHONY: all default docs docs-build docs-shell shell test
|
.PHONY: all default docs docs-build docs-shell shell checkvars
|
||||||
|
|
||||||
# to allow `make DOCSPORT=9000 docs`
|
# to allow `make DOCSPORT=9000 docs`
|
||||||
DOCSPORT := 3004
|
DOCSPORT := 3004
|
||||||
@ -11,23 +11,24 @@ DOCS_MOUNT := -v $(SOURCES_HOST_DIR):/site/content
|
|||||||
|
|
||||||
DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e NOCACHE -p 3004:3004 -p 3005:3005
|
DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e NOCACHE -p 3004:3004 -p 3005:3005
|
||||||
|
|
||||||
|
VERSION := $(shell head -n 1 VERSION)
|
||||||
|
|
||||||
default: docs
|
default: docs
|
||||||
|
|
||||||
|
checkvars:
|
||||||
|
ifndef ENV
|
||||||
|
$(error ENV is undefined set via ENV=staging or ENV=prod as argument to make)
|
||||||
|
endif
|
||||||
|
|
||||||
docs: docs-build
|
docs: docs-build
|
||||||
$(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "grunt --env=dev-docs && grunt connect --port=3004"
|
$(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "grunt --env=dev-docs && grunt connect --port=3004"
|
||||||
|
|
||||||
test: docs-build
|
watch: docs-build
|
||||||
$(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "ls -la /site/content"
|
|
||||||
|
|
||||||
docs-watch: docs-build
|
|
||||||
$(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "grunt --env=dev-docs && grunt connect --port=3004 & grunt watch --port=3004 --env=dev-docs"
|
$(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "grunt --env=dev-docs && grunt connect --port=3004 & grunt watch --port=3004 --env=dev-docs"
|
||||||
|
|
||||||
publish: docs-build
|
publish: checkvars docs-build
|
||||||
$(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "./publish.sh staging-docs root"
|
$(info Publishing ENV=${ENV} and VERSION=${VERSION})
|
||||||
|
$(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "./publish.sh ${ENV}-docs ${VERSION}"
|
||||||
publish-prod: docs-build
|
|
||||||
$(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "./publish.sh prod-docs root"
|
|
||||||
|
|
||||||
docs-build:
|
docs-build:
|
||||||
docker build -t "$(DOCKER_DOCS_IMAGE)" --no-cache .
|
docker build -t "$(DOCKER_DOCS_IMAGE)" --no-cache .
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
# Building The Docs
|
# Building The Docs
|
||||||
|
|
||||||
To build the docs locally, you need to have docker installed. The
|
To build the docs locally, you need to have docker installed. The
|
||||||
docs are built using a custom [docker](https://www.docker.com/) image
|
docs are built using [Hugo](http://gohugo.io/) - a static site generator.
|
||||||
and the [mkdocs](http://www.mkdocs.org/) tool.
|
|
||||||
|
|
||||||
**Prepare the Docker Image**:
|
**Prepare the Docker Image**:
|
||||||
|
|
||||||
@ -11,19 +10,23 @@ when running ``make docs-build`` depending on how your system's docker
|
|||||||
service is configured):
|
service is configured):
|
||||||
|
|
||||||
```
|
```
|
||||||
$ git clone https://github.com/grafana/grafana.org
|
git clone https://github.com/grafana/grafana.org
|
||||||
$ cd grafana.org
|
cd grafana.org
|
||||||
$ make docs-build
|
make docs-build
|
||||||
```
|
```
|
||||||
|
|
||||||
**Build the Documentation**:
|
**Build the Documentation**:
|
||||||
|
|
||||||
Now that the docker image has been prepared we can build the
|
Now that the docker image has been prepared we can build the
|
||||||
docs. Switch your working directory back to the directory this file
|
grafana docs and start a docs server. Switch your working directory back to the directory this file
|
||||||
(README.md) is in and run (possibly with ``sudo``):
|
(README.md) is in.
|
||||||
|
|
||||||
|
An AWS config file is required to build the docs Docker image and to publish the site to AWS. If you are building locally only and do not have any AWS credentials for docs.grafana.org then create an empty file named `awsconfig` in the current directory.
|
||||||
|
|
||||||
|
Then run (possibly with ``sudo``):
|
||||||
|
|
||||||
```
|
```
|
||||||
$ make docs
|
make watch
|
||||||
```
|
```
|
||||||
|
|
||||||
This command will not return control of the shell to the user. Instead
|
This command will not return control of the shell to the user. Instead
|
||||||
@ -32,4 +35,21 @@ we created in the previous step.
|
|||||||
|
|
||||||
Open [localhost:3004](http://localhost:3004) to view the docs.
|
Open [localhost:3004](http://localhost:3004) to view the docs.
|
||||||
|
|
||||||
|
### Images & Content
|
||||||
|
|
||||||
|
All markdown files are located in this repo (main grafana repo). But all images are added to the https://github.com/grafana/grafana.org repo. So the process of adding images is a bit complicated.
|
||||||
|
|
||||||
|
First you need create a feature (PR) branch of https://github.com/grafana/grafana.org so you can make change. Then add the image to the `/static/img/docs` directory. Then make a commit that adds the image.
|
||||||
|
|
||||||
|
Then run:
|
||||||
|
```
|
||||||
|
make docs-build
|
||||||
|
```
|
||||||
|
|
||||||
|
This will rebuild the docs docker container.
|
||||||
|
|
||||||
|
To be able to use the image your have to quit (CTRL-C) the `make watch` command (that you run in the same directory as this README). Then simply rerun `make watch`, it will restart the docs server but now with access to your image.
|
||||||
|
|
||||||
|
### Editing content
|
||||||
|
|
||||||
|
Changes to the markdown files should automatically cause a docs rebuild and live reload should reload the page in your browser.
|
||||||
|
@ -1 +1 @@
|
|||||||
3.1.0
|
v4.2
|
||||||
|
3
docs/publish.sh
Executable file
3
docs/publish.sh
Executable file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
make publish ENV=prod VERSION=root
|
@ -27,6 +27,24 @@ To show all admin commands:
|
|||||||
|
|
||||||
### Reset admin password
|
### Reset admin password
|
||||||
|
|
||||||
You can reset the password for the admin user using the CLI.
|
You can reset the password for the admin user using the CLI. The use case for this command is when you have lost the admin password.
|
||||||
|
|
||||||
`grafana-cli admin reset-admin-password ...`
|
`grafana-cli admin reset-admin-password ...`
|
||||||
|
|
||||||
|
If running the command returns this error:
|
||||||
|
|
||||||
|
> Could not find config defaults, make sure homepath command line parameter is set or working directory is homepath
|
||||||
|
|
||||||
|
then there are two flags that can be used to set homepath and the config file path.
|
||||||
|
|
||||||
|
`grafana-cli admin reset-admin-password --homepath "/usr/share/grafana" newpass`
|
||||||
|
|
||||||
|
If you have not lost the admin password then it is better to set in the Grafana UI. If you need to set the password in a script then the [Grafana API](http://docs.grafana.org/http_api/user/#change-password) can be used. Here is an example with curl using basic auth:
|
||||||
|
|
||||||
|
```
|
||||||
|
curl -X PUT -H "Content-Type: application/json" -d '{
|
||||||
|
"oldPassword": "admin",
|
||||||
|
"newPassword": "newpass",
|
||||||
|
"confirmNew": "newpass"
|
||||||
|
}' http://admin:admin@<your_grafana_host>:3000/api/user/password
|
||||||
|
```
|
||||||
|
@ -12,18 +12,18 @@ weight = 2
|
|||||||
|
|
||||||
# Alert Notifications
|
# Alert Notifications
|
||||||
|
|
||||||
{{< imgbox max-width="40%" img="/img/docs/v4/alert_notifications_menu.png" caption="Alerting notifications" >}}
|
|
||||||
|
|
||||||
> Alerting is only available in Grafana v4.0 and above.
|
> Alerting is only available in Grafana v4.0 and above.
|
||||||
|
|
||||||
When an alert changes state it sends out notifications. Each alert rule can have
|
When an alert changes state it sends out notifications. Each alert rule can have
|
||||||
multiple notifications. But in order to add a notification to an alert rule you first need
|
multiple notifications. But in order to add a notification to an alert rule you first need
|
||||||
to add and configure a `notification` object. This is done from the Alerting/Notifications page.
|
to add and configure a `notification` channel (can be email, Pagerduty or other integration). This is done from the Notification Channels page.
|
||||||
|
|
||||||
## Notification Setup
|
## Notification Channel Setup
|
||||||
|
|
||||||
On the notifications list page hit the `New Notification` button to go the the page where you
|
{{< imgbox max-width="40%" img="/img/docs/v43/alert_notifications_menu.png" caption="Alerting Notification Channels" >}}
|
||||||
can configure and setup a new notification.
|
|
||||||
|
On the Notification Channels page hit the `New Channel` button to go the the page where you
|
||||||
|
can configure and setup a new Notification Channel.
|
||||||
|
|
||||||
You specify name and type, and type specific options. You can also test the notification to make
|
You specify name and type, and type specific options. You can also test the notification to make
|
||||||
sure it's working and setup correctly.
|
sure it's working and setup correctly.
|
||||||
@ -32,15 +32,15 @@ sure it's working and setup correctly.
|
|||||||
|
|
||||||
When checked this option will make this notification used for all alert rules, existing and new.
|
When checked this option will make this notification used for all alert rules, existing and new.
|
||||||
|
|
||||||
## Supported notification types
|
## Supported Notification Types
|
||||||
|
|
||||||
Grafana ships with a set of notification types. More will be added in future releases.
|
Grafana ships with the following set of notification types:
|
||||||
|
|
||||||
### Email
|
### Email
|
||||||
|
|
||||||
To enable email notification you have to setup [SMTP settings](/installation/configuration/#smtp)
|
To enable email notification you have to setup [SMTP settings](/installation/configuration/#smtp)
|
||||||
in the Grafana config. Email notification will upload an image of the alert graph to an
|
in the Grafana config. Email notification will upload an image of the alert graph to an
|
||||||
external image destination if available or fallback on attaching the image in the email.
|
external image destination if available or fallback to attaching the image in the email.
|
||||||
|
|
||||||
### Slack
|
### Slack
|
||||||
|
|
||||||
@ -55,19 +55,29 @@ Setting | Description
|
|||||||
Recipient | allows you to override the slack recipient.
|
Recipient | allows you to override the slack recipient.
|
||||||
Mention | make it possible to include a mention in the slack notification sent by Grafana. Ex @here or @channel
|
Mention | make it possible to include a mention in the slack notification sent by Grafana. Ex @here or @channel
|
||||||
|
|
||||||
|
### PagerDuty
|
||||||
|
|
||||||
|
To set up PagerDuty, all you have to do is to provide an api key.
|
||||||
|
|
||||||
|
Setting | Description
|
||||||
|
---------- | -----------
|
||||||
|
Integration Key | Integration key for pagerduty.
|
||||||
|
Auto resolve incidents | Resolve incidents in pagerduty once the alert goes back to ok
|
||||||
|
|
||||||
### Webhook
|
### Webhook
|
||||||
|
|
||||||
The webhook notification is a simple way to send information about an state change over HTTP to a custom endpoint.
|
The webhook notification is a simple way to send information about an state change over HTTP to a custom endpoint.
|
||||||
Using this notification you could integrated Grafana into any system you choose, by yourself.
|
Using this notification you could integrate Grafana into any system you choose, by yourself.
|
||||||
|
|
||||||
Example json body:
|
Example json body:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"title": "My alert",
|
"title": "My alert",
|
||||||
"ruleId": 1,
|
"ruleId": 1,
|
||||||
"ruleName": "Load peaking!",
|
"ruleName": "Load peaking!",
|
||||||
"ruleUrl": "http://url.to.grafana/db/dashboard/my_dashboard?panelId=2",
|
"ruleUrl": "http://url.to.grafana/db/dashboard/my_dashboard?panelId=2",
|
||||||
"state": "Alerting",
|
"state": "alerting",
|
||||||
"imageUrl": "http://s3.image.url",
|
"imageUrl": "http://s3.image.url",
|
||||||
"message": "Load is peaking. Make sure the traffic is real and spin up more webfronts",
|
"message": "Load is peaking. Make sure the traffic is real and spin up more webfronts",
|
||||||
"evalMatches": [
|
"evalMatches": [
|
||||||
@ -80,30 +90,38 @@ Example json body:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### PagerDuty
|
- **state** - The possible values for alert state are: `ok`, `paused`, `alerting`, `pending`, `no_data`.
|
||||||
|
|
||||||
To set up PagerDuty, all you have to do is to provide an api key.
|
### Other Supported Notification Channels
|
||||||
|
|
||||||
Setting | Description
|
Grafana also supports the following Notification Channels:
|
||||||
---------- | -----------
|
|
||||||
Integration Key | Integration key for pagerduty.
|
|
||||||
Auto resolve incidents | Resolve incidents in pagerduty once the alert goes back to ok
|
|
||||||
|
|
||||||
|
- HipChat
|
||||||
|
|
||||||
|
- VictorOps
|
||||||
|
|
||||||
|
- Sensu
|
||||||
|
|
||||||
|
- OpsGenie
|
||||||
|
|
||||||
|
- Threema
|
||||||
|
|
||||||
|
- Pushover
|
||||||
|
|
||||||
|
- Telegram
|
||||||
|
|
||||||
|
- LINE
|
||||||
|
|
||||||
# Enable images in notifications {#external-image-store}
|
# Enable images in notifications {#external-image-store}
|
||||||
|
|
||||||
Grafana can render the panel associated with the alert rule and include that in the notification. Some types
|
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessable (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
|
||||||
of notifications require that this image be publicly accessable (Slack for example). In order to support
|
Amazon S3 and Webdav for this. So to set that up you need to configure the [external image uploader](/installation/configuration/#external-image-storage) in your grafana-server ini config file.
|
||||||
images in notifications like Slack Grafana can upload the image to an image store. It currently supports
|
|
||||||
Amazon S3 for this and Webdav. So to set that up you need to configure the
|
|
||||||
[external image uploader](/installation/configuration/#external-image-storage) in your grafana-server ini
|
|
||||||
config file.
|
|
||||||
|
|
||||||
This is an optional requirement, you can get slack and email notifications without setting this up.
|
Currently only the Email Channels attaches images if no external image store is specified. To include images in alert notifications for other channels then you need to set up an external image store.
|
||||||
|
|
||||||
|
This is an optional requirement, you can get Slack and email notifications without setting this up.
|
||||||
|
|
||||||
# Configure the link back to Grafana from alert notifications
|
# Configure the link back to Grafana from alert notifications
|
||||||
|
|
||||||
All alert notifications contains a link back to the triggered alert in the Grafana instance.
|
All alert notifications contains a link back to the triggered alert in the Grafana instance.
|
||||||
This url is based on the [domain](/installation/configuration/#domain) setting in Grafana.
|
This url is based on the [domain](/installation/configuration/#domain) setting in Grafana.
|
||||||
|
|
||||||
|
|
||||||
|
@ -52,12 +52,22 @@ Here you can specify the name of the alert rule and how often the scheduler shou
|
|||||||
### Conditions
|
### Conditions
|
||||||
|
|
||||||
Currently the only condition type that exists is a `Query` condition that allows you to
|
Currently the only condition type that exists is a `Query` condition that allows you to
|
||||||
specify a query letter, time range and an aggregation function. The letter refers to
|
specify a query letter, time range and an aggregation function.
|
||||||
a query you already have added in the **Metrics** tab. The result from the query and the aggregation function is
|
|
||||||
a single value that is then used in the threshold check. The query used in an alert rule cannot
|
|
||||||
contain any template variables. Currently we only support `AND` and `OR` operators between conditions and they are executed serially.
|
### Query condition example
|
||||||
|
|
||||||
|
```sql
|
||||||
|
avg() OF query(A, 5m, now) IS BELOW 14
|
||||||
|
```
|
||||||
|
|
||||||
|
- `avg()` Controls how the values for **each** serie should be reduced to a value that can be compared against the threshold. Click on the function to change it to another aggregation function.
|
||||||
|
- `query(A, 5m, now)` The letter defines what query to execute from the **Metrics** tab. The second two parameters defines the time range, `5m, now` means 5 minutes from now to now. You can also do `10m, now-2m` to define a time range that will be 10 minutes from now to 2 minutes from now. This is useful if you want to ignore the last 2 minutes of data.
|
||||||
|
- `IS BELOW 14` Defines the type of threshold and the threshold value. You can click on `IS BELOW` to change the type of threshold.
|
||||||
|
|
||||||
|
The query used in an alert rule cannot contain any template variables. Currently we only support `AND` and `OR` operators between conditions and they are executed serially.
|
||||||
For example, we have 3 conditions in the following order:
|
For example, we have 3 conditions in the following order:
|
||||||
`condition:A(evaluates to: TRUE) OR condition:B(evaluates to: FALSE) AND condition:C(evaluates to: TRUE)`
|
*condition:A(evaluates to: TRUE) OR condition:B(evaluates to: FALSE) AND condition:C(evaluates to: TRUE)*
|
||||||
so the result will be calculated as ((TRUE OR FALSE) AND TRUE) = TRUE.
|
so the result will be calculated as ((TRUE OR FALSE) AND TRUE) = TRUE.
|
||||||
|
|
||||||
We plan to add other condition types in the future, like `Other Alert`, where you can include the state
|
We plan to add other condition types in the future, like `Other Alert`, where you can include the state
|
||||||
|
@ -12,10 +12,7 @@ weight = 200
|
|||||||
Here you can find links to older versions of the documentation that might be better suited for your version
|
Here you can find links to older versions of the documentation that might be better suited for your version
|
||||||
of Grafana.
|
of Grafana.
|
||||||
|
|
||||||
- [Latest](/)
|
- [Latest](http://docs.grafana.org)
|
||||||
- [Version 3.1](/v3.1)
|
- [Version 4.2](http://docs.grafana.org/v4.2)
|
||||||
- [Version 3.0](/v3.0)
|
- [Version 3.1](http://docs.grafana.org/v3.1)
|
||||||
- [Version 2.6](/v2.6)
|
- [Version 3.0](http://docs.grafana.org/v3.0)
|
||||||
- [Version 2.5](/v2.5)
|
|
||||||
- [Version 2.1](/v2.1)
|
|
||||||
- [Version 2.0](/v2.0)
|
|
||||||
|
@ -13,29 +13,26 @@ weight = 10
|
|||||||
|
|
||||||
# Using AWS CloudWatch in Grafana
|
# Using AWS CloudWatch in Grafana
|
||||||
|
|
||||||
Grafana ships with built in support for CloudWatch. You just have to add it as a data source and you will
|
Grafana ships with built in support for CloudWatch. You just have to add it as a data source and you will be ready to build dashboards for you CloudWatch metrics.
|
||||||
be ready to build dashboards for you CloudWatch metrics.
|
|
||||||
|
|
||||||
## Adding the data source
|
## Adding the data source to Grafana
|
||||||

|
|
||||||
|
|
||||||
1. Open the side menu by clicking the the Grafana icon in the top header.
|
1. Open the side menu by clicking the Grafana icon in the top header.
|
||||||
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
||||||
|
3. Click the `+ Add data source` button in the top header.
|
||||||
|
4. Select `Cloudwatch` from the *Type* dropdown.
|
||||||
|
|
||||||
> NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization.
|
> NOTE: If at any moment you have issues with getting this datasource to work and Grafana is giving you undescriptive errors then don't
|
||||||
|
forget to check your log file (try looking in /var/log/grafana/grafana.log).
|
||||||
3. Click the `Add new` link in the top header.
|
|
||||||
4. Select `CloudWatch` from the dropdown.
|
|
||||||
> NOTE: If at any moment you have issues with getting this datasource to work and grafana is giving you undescriptive errors then dont forget to check your log file (try looking in /var/log/grafana/).
|
|
||||||
|
|
||||||
Name | Description
|
Name | Description
|
||||||
------------ | -------------
|
------------ | -------------
|
||||||
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
|
*Name* | The data source name. This is how you refer to the data source in panels & queries.
|
||||||
Default | Default data source means that it will be pre-selected for new panels.
|
*Default* | Default data source means that it will be pre-selected for new panels.
|
||||||
Credentials profile name | Specify the name of the profile to use (if you use `~/aws/credentials` file), leave blank for default. This option was introduced in Grafana 2.5.1
|
*Credentials* profile name | Specify the name of the profile to use (if you use `~/aws/credentials` file), leave blank for default.
|
||||||
Default Region | Used in query editor to set region (can be changed on per query basis)
|
*Default Region* | Used in query editor to set region (can be changed on per query basis)
|
||||||
Custom Metrics namespace | Specify the CloudWatch namespace of Custom metrics
|
*Custom Metrics namespace* | Specify the CloudWatch namespace of Custom metrics
|
||||||
Assume Role Arn | Specify the ARN of the role to assume
|
*Assume Role Arn* | Specify the ARN of the role to assume
|
||||||
|
|
||||||
## Authentication
|
## Authentication
|
||||||
|
|
||||||
@ -61,49 +58,64 @@ Example content:
|
|||||||
|
|
||||||
## Metric Query Editor
|
## Metric Query Editor
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
You need to specify a namespace, metric, at least one stat, and at least one dimension.
|
You need to specify a namespace, metric, at least one stat, and at least one dimension.
|
||||||
|
|
||||||
## Templated queries
|
## Templated queries
|
||||||
CloudWatch Datasource Plugin provides the following functions in `Variables values query` field in Templating Editor to query `region`, `namespaces`, `metric names` and `dimension keys/values` on the CloudWatch.
|
|
||||||
|
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place.
|
||||||
|
Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data
|
||||||
|
being displayed in your dashboard.
|
||||||
|
|
||||||
|
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different
|
||||||
|
types of template variables.
|
||||||
|
|
||||||
|
### Query variable
|
||||||
|
|
||||||
|
CloudWatch Datasource Plugin provides the following queries you can specify in the `Query` field in the Variable
|
||||||
|
edit view. They allow you to fill a variable's options list with things like `region`, `namespaces`, `metric names`
|
||||||
|
and `dimension keys/values`.
|
||||||
|
|
||||||
Name | Description
|
Name | Description
|
||||||
------- | --------
|
------- | --------
|
||||||
`regions()` | Returns a list of regions AWS provides their service.
|
*regions()* | Returns a list of regions AWS provides their service.
|
||||||
`namespaces()` | Returns a list of namespaces CloudWatch support.
|
*namespaces()* | Returns a list of namespaces CloudWatch support.
|
||||||
`metrics(namespace, [region])` | Returns a list of metrics in the namespace. (specify region for custom metrics)
|
*metrics(namespace, [region])* | Returns a list of metrics in the namespace. (specify region for custom metrics)
|
||||||
`dimension_keys(namespace)` | Returns a list of dimension keys in the namespace.
|
*dimension_keys(namespace)* | Returns a list of dimension keys in the namespace.
|
||||||
`dimension_values(region, namespace, metric, dimension_key)` | Returns a list of dimension values matching the specified `region`, `namespace`, `metric` and `dimension_key`.
|
*dimension_values(region, namespace, metric, dimension_key)* | Returns a list of dimension values matching the specified `region`, `namespace`, `metric` and `dimension_key`.
|
||||||
`ebs_volume_ids(region, instance_id)` | Returns a list of volume id matching the specified `region`, `instance_id`.
|
*ebs_volume_ids(region, instance_id)* | Returns a list of volume id matching the specified `region`, `instance_id`.
|
||||||
`ec2_instance_attribute(region, attribute_name, filters)` | Returns a list of attribute matching the specified `region`, `attribute_name`, `filters`.
|
*ec2_instance_attribute(region, attribute_name, filters)* | Returns a list of attribute matching the specified `region`, `attribute_name`, `filters`.
|
||||||
|
|
||||||
For details about the metrics CloudWatch provides, please refer to the [CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
|
For details about the metrics CloudWatch provides, please refer to the [CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
|
||||||
|
|
||||||
## Example templated Queries
|
#### Examples templated Queries
|
||||||
|
|
||||||
Example dimension queries which will return list of resources for individual AWS Services:
|
Example dimension queries which will return list of resources for individual AWS Services:
|
||||||
|
|
||||||
Service | Query
|
Query | Service
|
||||||
------- | -----
|
------- | -----
|
||||||
ELB | `dimension_values(us-east-1,AWS/ELB,RequestCount,LoadBalancerName)`
|
*dimension_values(us-east-1,AWS/ELB,RequestCount,LoadBalancerName)* | ELB
|
||||||
ElastiCache | `dimension_values(us-east-1,AWS/ElastiCache,CPUUtilization,CacheClusterId)`
|
*dimension_values(us-east-1,AWS/ElastiCache,CPUUtilization,CacheClusterId)* | ElastiCache
|
||||||
RedShift | `dimension_values(us-east-1,AWS/Redshift,CPUUtilization,ClusterIdentifier)`
|
*dimension_values(us-east-1,AWS/Redshift,CPUUtilization,ClusterIdentifier)* | RedShift
|
||||||
RDS | `dimension_values(us-east-1,AWS/RDS,CPUUtilization,DBInstanceIdentifier)`
|
*dimension_values(us-east-1,AWS/RDS,CPUUtilization,DBInstanceIdentifier)* | RDS
|
||||||
S3 | `dimension_values(us-east-1,AWS/S3,BucketSizeBytes,BucketName)`
|
*dimension_values(us-east-1,AWS/S3,BucketSizeBytes,BucketName)* | S3
|
||||||
|
|
||||||
## ec2_instance_attribute JSON filters
|
#### ec2_instance_attribute JSON filters
|
||||||
|
|
||||||
The `ec2_instance_attribute` query take `filters` in JSON format.
|
The `ec2_instance_attribute` query take `filters` in JSON format.
|
||||||
You can specify [pre-defined filters of ec2:DescribeInstances](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html).
|
You can specify [pre-defined filters of ec2:DescribeInstances](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html).
|
||||||
Specify like `{ filter_name1: [ filter_value1 ], filter_name2: [ filter_value2 ] }`
|
|
||||||
|
Filters syntax:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
{ filter_name1: [ filter_value1 ], filter_name2: [ filter_value2 ] }
|
||||||
|
```
|
||||||
|
|
||||||
Example `ec2_instance_attribute()` query
|
Example `ec2_instance_attribute()` query
|
||||||
|
|
||||||
ec2_instance_attribute(us-east-1, InstanceId, { "tag:Environment": [ "production" ] })
|
ec2_instance_attribute(us-east-1, InstanceId, { "tag:Environment": [ "production" ] })
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## Cost
|
## Cost
|
||||||
|
|
||||||
Amazon provides 1 million CloudWatch API requests each month at no additional charge. Past this,
|
Amazon provides 1 million CloudWatch API requests each month at no additional charge. Past this,
|
||||||
|
@ -12,34 +12,29 @@ weight = 3
|
|||||||
|
|
||||||
# Using Elasticsearch in Grafana
|
# Using Elasticsearch in Grafana
|
||||||
|
|
||||||
Grafana ships with advanced support for Elasticsearch. You can do many types of
|
Grafana ships with advanced support for Elasticsearch. You can do many types of simple or complex Elasticsearch queries to
|
||||||
simple or complex elasticsearch queries to visualize logs or metrics stored in elasticsearch. You can
|
visualize logs or metrics stored in Elasticsearch. You can also annotate your graphs with log events stored in Elasticsearch.
|
||||||
also annotate your graphs with log events stored in elasticsearch.
|
|
||||||
|
|
||||||
## Adding the data source
|
## Adding the data source
|
||||||
|
|
||||||

|
1. Open the side menu by clicking the Grafana icon in the top header.
|
||||||
|
|
||||||
1. Open the side menu by clicking the the Grafana icon in the top header.
|
|
||||||
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
||||||
|
3. Click the `+ Add data source` button in the top header.
|
||||||
|
4. Select *Elasticsearch* from the *Type* dropdown.
|
||||||
|
|
||||||
> NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization.
|
> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization.
|
||||||
|
|
||||||
3. Click the `Add new` link in the top header.
|
|
||||||
4. Select `Elasticsearch` from the dropdown.
|
|
||||||
|
|
||||||
Name | Description
|
Name | Description
|
||||||
------------ | -------------
|
------------ | -------------
|
||||||
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
|
*Name* | The data source name. This is how you refer to the data source in panels & queries.
|
||||||
Default | Default data source means that it will be pre-selected for new panels.
|
*Default* | Default data source means that it will be pre-selected for new panels.
|
||||||
Url | The http protocol, ip and port of you elasticsearch server.
|
*Url* | The HTTP protocol, IP, and port of your Elasticsearch server.
|
||||||
Access | Proxy = access via Grafana backend, Direct = access directly from browser.
|
*Access* | Proxy = access via Grafana backend, Direct = access directly from browser.
|
||||||
|
|
||||||
Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser.
|
Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication to the browser.
|
||||||
|
|
||||||
Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source.
|
|
||||||
|
|
||||||
### Direct access
|
### Direct access
|
||||||
|
|
||||||
If you select direct access you must update your Elasticsearch configuration to allow other domains to access
|
If you select direct access you must update your Elasticsearch configuration to allow other domains to access
|
||||||
Elasticsearch from the browser. You do this by specifying these to options in your **elasticsearch.yml** config file.
|
Elasticsearch from the browser. You do this by specifying these to options in your **elasticsearch.yml** config file.
|
||||||
|
|
||||||
@ -50,46 +45,94 @@ Elasticsearch from the browser. You do this by specifying these to options in yo
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
Here you can specify a default for the `time field` and specify the name of your elasticsearch index. You can use
|
Here you can specify a default for the `time field` and specify the name of your Elasticsearch index. You can use
|
||||||
a time pattern for the index name or a wildcard.
|
a time pattern for the index name or a wildcard.
|
||||||
|
|
||||||
|
### Elasticsearch version
|
||||||
|
|
||||||
|
Be sure to specify your Elasticsearch version in the version selection dropdown. This is very important as there are differences how queries are composed. Currently only 2.x and 5.x
|
||||||
|
are supported.
|
||||||
|
|
||||||
## Metric Query editor
|
## Metric Query editor
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
The Elasticsearch query editor allows you to select multiple metrics and group by multiple terms or filters. Use the plus and minus icons to the right to add / remove
|
The Elasticsearch query editor allows you to select multiple metrics and group by multiple terms or filters. Use the plus and minus icons to the right to add/remove
|
||||||
metrics or group bys. Some metrics and group by have options, click the option text to expand the the row to view and edit metric or group by options.
|
metrics or group by clauses. Some metrics and group by clauses haves options, click the option text to expand the row to view and edit metric or group by options.
|
||||||
|
|
||||||
|
## Series naming & alias patterns
|
||||||
|
|
||||||
|
You can control the name for time series via the `Alias` input field.
|
||||||
|
|
||||||
|
Pattern | Description
|
||||||
|
------------ | -------------
|
||||||
|
*{{term fieldname}}* | replaced with value of a term group by
|
||||||
|
*{{metric}}* | replaced with metric name (ex. Average, Min, Max)
|
||||||
|
*{{field}}* | replaced with the metric field name
|
||||||
|
|
||||||
## Pipeline metrics
|
## Pipeline metrics
|
||||||
|
|
||||||
If you have Elasticsearch 2.x and Grafana 2.6 or above then you can use pipeline metric aggregations like
|
Some metric aggregations are called Pipeline aggregations, for example, *Moving Average* and *Derivative*. Elasticsearch pipeline metrics require another metric to be based on. Use the eye icon next to the metric to hide metrics from appearing in the graph. This is useful for metrics you only have in the query for use in a pipeline metric.
|
||||||
**Moving Average** and **Derivative**. Elasticsearch pipeline metrics require another metric to be based on. Use the eye icon next to the metric
|
|
||||||
to hide metrics from appearing in the graph. This is useful for metrics you only have in the query to be used
|
|
||||||
in a pipeline metric.
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Templating
|
## Templating
|
||||||
|
|
||||||
The Elasticsearch datasource supports two types of queries you can use to fill template variables with values.
|
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place.
|
||||||
|
Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data
|
||||||
|
being displayed in your dashboard.
|
||||||
|
|
||||||
### Possible values for a field
|
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different
|
||||||
|
types of template variables.
|
||||||
|
|
||||||
```json
|
### Query variable
|
||||||
{"find": "terms", "field": "@hostname"}
|
|
||||||
|
The Elasticsearch data source supports two types of queries you can use in the *Query* field of *Query* variables. The query is written using a custom JSON string.
|
||||||
|
|
||||||
|
Query | Description
|
||||||
|
------------ | -------------
|
||||||
|
*{"find": "fields", "type": "keyword"} | Returns a list of field names with the index type `keyword`.
|
||||||
|
*{"find": "terms", "field": "@hostname", "size": 1000}* | Returns a list of values for a field using term aggregation. Query will user current dashboard time range as time range for query.
|
||||||
|
*{"find": "terms", "field": "@hostname", "query": '<lucene query>'}* | Returns a list of values for a field using term aggregation & and a specified lucene query filter. Query will use current dashboard time range as time range for query.
|
||||||
|
|
||||||
|
There is a default size limit of 500 on terms queries. Set the size property in your query to set a custom limit.
|
||||||
|
You can use other variables inside the query. Example query definition for a variable named `$host`.
|
||||||
|
|
||||||
|
```
|
||||||
|
{"find": "terms", "field": "@hostname", "query": "@source:$source"}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Fields filtered by type
|
In the above example, we use another variable named `$source` inside the query definition. Whenever you change, via the dropdown, the current value of the ` $source` variable, it will trigger an update of the `$host` variable so it now only contains hostnames filtered by in this case the
|
||||||
```json
|
`@source` document property.
|
||||||
{"find": "fields", "type": "string"}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Fields filtered by type, with filter
|
### Using variables in queries
|
||||||
```json
|
|
||||||
{"find": "fields", "type": "string", "query": <lucene query>}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Multi format / All format
|
There are two syntaxes:
|
||||||
Use lucene format.
|
|
||||||
|
|
||||||
|
- `$<varname>` Example: @hostname:$hostname
|
||||||
|
- `[[varname]]` Example: @hostname:[[hostname]]
|
||||||
|
|
||||||
|
Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. When the *Multi-value* or *Include all value*
|
||||||
|
options are enabled, Grafana converts the labels from plain text to a lucene compatible condition.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
In the above example, we have a lucene query that filters documents based on the `@hostname` property using a variable named `$hostname`. It is also using
|
||||||
|
a variable in the *Terms* group by field input box. This allows you to use a variable to quickly change how the data is grouped.
|
||||||
|
|
||||||
|
Example dashboard:
|
||||||
|
[Elasticsearch Templated Dashboard](http://play.grafana.org/dashboard/db/elasticsearch-templated)
|
||||||
|
|
||||||
|
## Annotations
|
||||||
|
|
||||||
|
[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation
|
||||||
|
queries via the Dashboard menu / Annotations view. Grafana can query any Elasticsearch index
|
||||||
|
for annotation events.
|
||||||
|
|
||||||
|
Name | Description
|
||||||
|
------------ | -------------
|
||||||
|
Query | You can leave the search query blank or specify a lucene query
|
||||||
|
Time | The name of the time field, needs to be date field.
|
||||||
|
Title | The name of the field to use for the event title.
|
||||||
|
Tags | Optional field name to use for event tags (can be an array or a CSV string).
|
||||||
|
Text | Optional field name to use event text body.
|
||||||
|
@ -18,28 +18,22 @@ change function parameters and much more. The editor can handle all types of gra
|
|||||||
queries through the use of query references.
|
queries through the use of query references.
|
||||||
|
|
||||||
## Adding the data source
|
## Adding the data source
|
||||||

|
|
||||||
|
|
||||||
1. Open the side menu by clicking the the Grafana icon in the top header.
|
1. Open the side menu by clicking the Grafana icon in the top header.
|
||||||
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
||||||
|
3. Click the `+ Add data source` button in the top header.
|
||||||
|
4. Select `Graphite` from the *Type* dropdown.
|
||||||
|
|
||||||
> NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization.
|
> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization.
|
||||||
|
|
||||||
3. Click the `Add new` link in the top header.
|
|
||||||
4. Select `Graphite` from the dropdown.
|
|
||||||
|
|
||||||
Name | Description
|
Name | Description
|
||||||
------------ | -------------
|
------------ | -------------
|
||||||
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
|
*Name* | The data source name. This is how you refer to the data source in panels & queries.
|
||||||
Default | Default data source means that it will be pre-selected for new panels.
|
*Default* | Default data source means that it will be pre-selected for new panels.
|
||||||
Url | The http protocol, ip and port of your graphite-web or graphite-api install.
|
*Url* | The HTTP protocol, IP, and port of your graphite-web or graphite-api install.
|
||||||
Access | Proxy = access via Grafana backend, Direct = access directly from browser.
|
*Access* | Proxy = access via Grafana backend, Direct = access directly from browser.
|
||||||
|
|
||||||
|
|
||||||
Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser.
|
|
||||||
|
|
||||||
Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source.
|
|
||||||
|
|
||||||
|
Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the browser.
|
||||||
|
|
||||||
## Metric editor
|
## Metric editor
|
||||||
|
|
||||||
@ -50,6 +44,7 @@ or keyboard arrow keys. You can select a wildcard and still continue.
|
|||||||

|

|
||||||
|
|
||||||
### Functions
|
### Functions
|
||||||
|
|
||||||
Click the plus icon to the right to add a function. You can search for the function or select it from the menu. Once
|
Click the plus icon to the right to add a function. You can search for the function or select it from the menu. Once
|
||||||
a function is selected it will be added and your focus will be in the text box of the first parameter. To later change
|
a function is selected it will be added and your focus will be in the text box of the first parameter. To later change
|
||||||
a parameter just click on it and it will turn into a text box. To delete a function click the function name followed
|
a parameter just click on it and it will turn into a text box. To delete a function click the function name followed
|
||||||
@ -57,32 +52,61 @@ by the x icon.
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
### Optional parameters
|
### Optional parameters
|
||||||
|
|
||||||
Some functions like aliasByNode support an optional second argument. To add this parameter specify for example 3,-2 as the first parameter and the function editor will adapt and move the -2 to a second parameter. To remove the second optional parameter just click on it and leave it blank and the editor will remove it.
|
Some functions like aliasByNode support an optional second argument. To add this parameter specify for example 3,-2 as the first parameter and the function editor will adapt and move the -2 to a second parameter. To remove the second optional parameter just click on it and leave it blank and the editor will remove it.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
### Nested Queries
|
||||||
|
|
||||||
|
You can reference queries by the row “letter” that they’re on (similar to Microsoft Excel). If you add a second query to a graph, you can reference the first query simply by typing in #A. This provides an easy and convenient way to build compounded queries.
|
||||||
|
|
||||||
## Point consolidation
|
## Point consolidation
|
||||||
|
|
||||||
All Graphite metrics are consolidated so that Graphite doesn't return more data points than there are pixels in the graph. By default
|
All Graphite metrics are consolidated so that Graphite doesn't return more data points than there are pixels in the graph. By default,
|
||||||
this consolidation is done using `avg` function. You can how Graphite consolidates metrics by adding the Graphite consolidateBy function.
|
this consolidation is done using `avg` function. You can how Graphite consolidates metrics by adding the Graphite consolidateBy function.
|
||||||
|
|
||||||
> *Notice* This means that legend summary values (max, min, total) cannot be all correct at the same time. They are calculated
|
> *Notice* This means that legend summary values (max, min, total) cannot be all correct at the same time. They are calculated
|
||||||
> client side by Grafana. And depending on your consolidation function only one or two can be correct at the same time.
|
> client side by Grafana. And depending on your consolidation function only one or two can be correct at the same time.
|
||||||
|
|
||||||
## Templating
|
## Templating
|
||||||
You can create a template variable in Grafana and have that variable filled with values from any Graphite metric exploration query.
|
|
||||||
You can then use this variable in your Graphite queries, either as part of a metric path or as arguments to functions.
|
|
||||||
|
|
||||||
For example a query like `prod.servers.*` will fill the variable with all possible
|
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place.
|
||||||
values that exists in the wildcard position.
|
Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data
|
||||||
|
being displayed in your dashboard.
|
||||||
|
|
||||||
|
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different
|
||||||
|
types of template variables.
|
||||||
|
|
||||||
|
### Query variable
|
||||||
|
|
||||||
|
The query you specify in the query field should be a metric find type of query. For example, a query like `prod.servers.*` will fill the
|
||||||
|
variable with all possible values that exist in the wildcard position.
|
||||||
|
|
||||||
You can also create nested variables that use other variables in their definition. For example
|
You can also create nested variables that use other variables in their definition. For example
|
||||||
`apps.$app.servers.*` uses the variable `$app` in its query definition.
|
`apps.$app.servers.*` uses the variable `$app` in its query definition.
|
||||||
|
|
||||||
|
### Variable usage
|
||||||
|
|
||||||
|
You can use a variable in a metric node path or as a parameter to a function.
|
||||||

|

|
||||||
|
|
||||||
|
There are two syntaxes:
|
||||||
|
|
||||||
## Query Reference
|
- `$<varname>` Example: apps.frontend.$server.requests.count
|
||||||
You can reference queries by the row “letter” that they’re on (similar to Microsoft Excel). If you add a second query to graph, you can reference the first query simply by typing in #A. This provides an easy and convenient way to build compounded queries.
|
- `[[varname]]` Example: apps.frontend.[[server]].requests.count
|
||||||
|
|
||||||
|
Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. Use
|
||||||
|
the second syntax in expressions like `my.server[[serverNumber]].count`.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
[Graphite Templated Dashboard](http://play.grafana.org/dashboard/db/graphite-templated-nested)
|
||||||
|
|
||||||
|
## Annotations
|
||||||
|
|
||||||
|
[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation
|
||||||
|
queries via the Dashboard menu / Annotations view.
|
||||||
|
|
||||||
|
Graphite supports two ways to query annotations. A regular metric query, for this you use the `Graphite query` textbox. A Graphite events query, use the `Graphite event tags` textbox,
|
||||||
|
specify a tag or wildcard (leave empty should also work)
|
||||||
|
@ -15,29 +15,29 @@ weight = 3
|
|||||||
Grafana ships with very feature rich data source plugin for InfluxDB. Supporting a feature rich query editor, annotation and templating queries.
|
Grafana ships with very feature rich data source plugin for InfluxDB. Supporting a feature rich query editor, annotation and templating queries.
|
||||||
|
|
||||||
## Adding the data source
|
## Adding the data source
|
||||||

|
|
||||||
|
|
||||||
1. Open the side menu by clicking the the Grafana icon in the top header.
|
1. Open the side menu by clicking the Grafana icon in the top header.
|
||||||
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
||||||
|
3. Click the `+ Add data source` button in the top header.
|
||||||
|
4. Select *InfluxDB* from the *Type* dropdown.
|
||||||
|
|
||||||
> NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization.
|
> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization.
|
||||||
|
|
||||||
3. Click the `Add new` link in the top header.
|
|
||||||
|
|
||||||
Name | Description
|
Name | Description
|
||||||
------------ | -------------
|
------------ | -------------
|
||||||
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
|
*Name* | The data source name. This is how you refer to the data source in panels & queries.
|
||||||
Default | Default data source means that it will be pre-selected for new panels.
|
*Default* | Default data source means that it will be pre-selected for new panels.
|
||||||
Url | The http protocol, ip and port of you influxdb api (influxdb api port is by default 8086)
|
*Url* | The http protocol, ip and port of you influxdb api (influxdb api port is by default 8086)
|
||||||
Access | Proxy = access via Grafana backend, Direct = access directly from browser.
|
*Access* | Proxy = access via Grafana backend, Direct = access directly from browser.
|
||||||
Database | Name of your influxdb database
|
*Database* | Name of your influxdb database
|
||||||
User | Name of your database user
|
*User* | Name of your database user
|
||||||
Password | Database user's password
|
*Password* | Database user's password
|
||||||
|
|
||||||
> Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser.
|
### Proxy vs Direct access
|
||||||
|
|
||||||
> Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source.
|
|
||||||
|
|
||||||
|
Proxy access means that the Grafana backend will proxy all requests from the browser. So requests to InfluxDB will be channeled through
|
||||||
|
`grafana-server`. This means that the URL you specify needs to be accessable from the server you are running Grafana on. Proxy access
|
||||||
|
mode is also more secure as the username & password will never reach the browser.
|
||||||
|
|
||||||
## Query Editor
|
## Query Editor
|
||||||
|
|
||||||
@ -100,11 +100,21 @@ change the option `Format As` to `Table` if you want to show raw data in the `Ta
|
|||||||
|
|
||||||
|
|
||||||
## Templating
|
## Templating
|
||||||
You can create a template variable in Grafana and have that variable filled with values from any InfluxDB metric exploration query.
|
|
||||||
You can then use this variable in your InfluxDB metric queries.
|
|
||||||
|
|
||||||
For example you can have a variable that contains all values for tag `hostname` if you specify a query like this
|
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place.
|
||||||
in the templating edit view.
|
Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data
|
||||||
|
being displayed in your dashboard.
|
||||||
|
|
||||||
|
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different
|
||||||
|
types of template variables.
|
||||||
|
|
||||||
|
### Query variable
|
||||||
|
|
||||||
|
If you add a template variable of the type `Query` you can write a InfluxDB exploration (meta data) query. These queries can
|
||||||
|
return things like measurement names, key names or key values.
|
||||||
|
|
||||||
|
For example you can have a variable that contains all values for tag `hostname` if you specify a query like this in the templating variable *Query* setting.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SHOW TAG VALUES WITH KEY = "hostname"
|
SHOW TAG VALUES WITH KEY = "hostname"
|
||||||
```
|
```
|
||||||
@ -116,12 +126,46 @@ the hosts variable only show hosts from the current selected region with a query
|
|||||||
SHOW TAG VALUES WITH KEY = "hostname" WHERE region =~ /$region/
|
SHOW TAG VALUES WITH KEY = "hostname" WHERE region =~ /$region/
|
||||||
```
|
```
|
||||||
|
|
||||||
> Always use `regex values` or `regex wildcard` for All format or multi select format.
|
You can fetch key names for a given measurement.
|
||||||
|
|
||||||

|
```sql
|
||||||
|
SHOW TAG KEYS [FROM <measurement_name>]
|
||||||
|
```
|
||||||
|
|
||||||
|
If you have a variable with key names you can use this variable in a group by clause. This will allow you to change group by using the variable dropdown a the top
|
||||||
|
of the dashboard.
|
||||||
|
|
||||||
|
### Using variables in queries
|
||||||
|
|
||||||
|
There are two syntaxes:
|
||||||
|
|
||||||
|
`$<varname>` Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT mean("value") FROM "logins" WHERE "hostname" =~ /^$host$/ AND $timeFilter GROUP BY time($__interval), "hostname"
|
||||||
|
```
|
||||||
|
|
||||||
|
`[[varname]]` Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT mean("value") FROM "logins" WHERE "hostname" =~ /^[[host]]$/ AND $timeFilter GROUP BY time($__interval), "hostname"
|
||||||
|
```
|
||||||
|
|
||||||
|
Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. When the *Multi-value* or *Include all value*
|
||||||
|
options are enabled, Grafana converts the labels from plain text to a regex compatible string. Which means you have to use `=~` instead of `=`.
|
||||||
|
|
||||||
|
Example Dashboard:
|
||||||
|
[InfluxDB Templated Dashboard](http://play.grafana.org/dashboard/db/influxdb-templated-queries)
|
||||||
|
|
||||||
|
### Ad hoc filters variable
|
||||||
|
|
||||||
|
InfluxDB supports the special `Ad hoc filters` variable type. This variable allows you to specify any number of key/value filters on the fly. These filters will automatically
|
||||||
|
be applied to all your InfluxDB queries.
|
||||||
|
|
||||||
## Annotations
|
## Annotations
|
||||||
Annotations allows you to overlay rich event information on top of graphs.
|
|
||||||
|
[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation
|
||||||
|
queries via the Dashboard menu / Annotations view.
|
||||||
|
|
||||||
An example query:
|
An example query:
|
||||||
|
|
||||||
@ -129,4 +173,8 @@ An example query:
|
|||||||
SELECT title, description from events WHERE $timeFilter order asc
|
SELECT title, description from events WHERE $timeFilter order asc
|
||||||
```
|
```
|
||||||
|
|
||||||
|
For InfluxDB you need to enter a query like in the above example. You need to have the ```where $timeFilter```
|
||||||
|
part. If you only select one column you will not need to enter anything in the column mapping fields. The
|
||||||
|
Tags field can be a comma seperated string.
|
||||||
|
|
||||||
|
|
||||||
|
118
docs/sources/features/datasources/mysql.md
Normal file
118
docs/sources/features/datasources/mysql.md
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
+++
|
||||||
|
title = "Using MySQL in Grafana"
|
||||||
|
description = "Guide for using MySQL in Grafana"
|
||||||
|
keywords = ["grafana", "mysql", "guide"]
|
||||||
|
type = "docs"
|
||||||
|
[menu.docs]
|
||||||
|
name = "MySQL"
|
||||||
|
parent = "datasources"
|
||||||
|
weight = 7
|
||||||
|
+++
|
||||||
|
|
||||||
|
# Using MySQL in Grafana
|
||||||
|
|
||||||
|
> Only available in Grafana v4.3+. This data source is not ready for
|
||||||
|
> production use, currently in development (alpha state).
|
||||||
|
|
||||||
|
Grafana ships with a built-in MySQL data source plugin that allow you to query any visualize
|
||||||
|
data from a MySQL compatible database.
|
||||||
|
|
||||||
|
## Adding the data source
|
||||||
|
|
||||||
|
1. Open the side menu by clicking the Grafana icon in the top header.
|
||||||
|
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
||||||
|
3. Click the `+ Add data source` button in the top header.
|
||||||
|
4. Select *MySQL* from the *Type* dropdown.
|
||||||
|
|
||||||
|
### Database User Permissions (Important!)
|
||||||
|
|
||||||
|
The database user you specify when you add the data source should only be granted SELECT permissions on
|
||||||
|
the specified database & tables you want to query. Grafana does not validate that the query is safe. The query
|
||||||
|
could include any SQL statement. For example, statements like `USE otherdb;` and `DROP TABLE user;` would be
|
||||||
|
executed. To protect against this we **Highly** recommmend you create a specific mysql user with
|
||||||
|
restricted permissions.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER 'grafanaReader' IDENTIFIED BY 'password';
|
||||||
|
GRANT SELECT ON mydatabase.mytable TO 'grafanaReader';
|
||||||
|
```
|
||||||
|
|
||||||
|
You can use wildcards (`*`) in place of database or table if you want to grant access to more databases and tables.
|
||||||
|
|
||||||
|
## Macros
|
||||||
|
|
||||||
|
To simplify syntax and to allow for dynamic parts, like date range filters, the query can contain macros.
|
||||||
|
|
||||||
|
Macro example | Description
|
||||||
|
------------ | -------------
|
||||||
|
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn > FROM_UNIXTIME(1494410783) AND dateColumn < FROM_UNIXTIME(1494497183)*
|
||||||
|
|
||||||
|
We plan to add many more macros. If you have suggestions for what macros you would like to see, please
|
||||||
|
[open an issue](https://github.com/grafana/grafana) in our GitHub repo.
|
||||||
|
|
||||||
|
The query editor has a link named `Generated SQL` that show up after a query as been executed, while in panel edit mode. Click
|
||||||
|
on it and it will expand and show the raw interpolated SQL string that was executed.
|
||||||
|
|
||||||
|
## Table queries
|
||||||
|
|
||||||
|
If the `Format as` query option is set to `Table` then you can basically do any type of SQL query. The table panel will automatically show the results of whatever columns & rows your query returns.
|
||||||
|
|
||||||
|
Query editor with example query:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
The query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
title as 'Title',
|
||||||
|
user.login as 'Created By' ,
|
||||||
|
dashboard.created as 'Created On'
|
||||||
|
FROM dashboard
|
||||||
|
INNER JOIN user on user.id = dashboard.created_by
|
||||||
|
WHERE $__timeFilter(dashboard.created)
|
||||||
|
```
|
||||||
|
|
||||||
|
You can control the name of the Table panel columns by using regular `as ` SQL column selection syntax.
|
||||||
|
|
||||||
|
The resulting table panel:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### Time series queries
|
||||||
|
|
||||||
|
If you set `Format as` to `Time series`, for use in Graph panel for example, then there are some requirements for
|
||||||
|
what your query returns.
|
||||||
|
|
||||||
|
- Must be a column named `time_sec` representing a unix epoch in seconds.
|
||||||
|
- Must be a column named `value` representing the time series value.
|
||||||
|
- Must be a column named `metric` representing the time series name.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
min(UNIX_TIMESTAMP(time_date_time)) as time_sec,
|
||||||
|
max(value_double) as value,
|
||||||
|
metric1 as metric
|
||||||
|
FROM test_data
|
||||||
|
WHERE $__timeFilter(time_date_time)
|
||||||
|
GROUP BY metric1, UNIX_TIMESTAMP(time_date_time) DIV 300
|
||||||
|
ORDER BY time_sec asc
|
||||||
|
```
|
||||||
|
|
||||||
|
Currently, there is no support for a dynamic group by time based on time range & panel width.
|
||||||
|
This is something we plan to add.
|
||||||
|
|
||||||
|
## Templating
|
||||||
|
|
||||||
|
You can use variables in your queries but there are currently no support for defining `Query` variables
|
||||||
|
that target a MySQL data source.
|
||||||
|
|
||||||
|
## Alerting
|
||||||
|
|
||||||
|
Time series queries should work in alerting conditions. Table formatted queries is not yet supported in alert rule
|
||||||
|
conditions.
|
@ -12,59 +12,79 @@ weight = 5
|
|||||||
|
|
||||||
# Using OpenTSDB in Grafana
|
# Using OpenTSDB in Grafana
|
||||||
|
|
||||||
{{< docs-imagebox img="/img/docs/v2/add_OpenTSDB.png" max-width="14rem" >}}
|
Grafana ships with advanced support for OpenTSDB.
|
||||||
|
|
||||||
The newest release of Grafana adds additional functionality when using an OpenTSDB Data source.
|
## Adding the data source
|
||||||
|
|
||||||
1. Open the side menu by clicking the the Grafana icon in the top header.
|
1. Open the side menu by clicking the Grafana icon in the top header.
|
||||||
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
||||||
|
3. Click the `+ Add data source` button in the top header.
|
||||||
|
4. Select *OpenTSDB* from the *Type* dropdown.
|
||||||
|
|
||||||
> NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization.
|
> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization.
|
||||||
|
|
||||||
3. Click the `Add new` link in the top header.
|
|
||||||
4. Select `OpenTSDB` from the dropdown.
|
|
||||||
|
|
||||||
Name | Description
|
Name | Description
|
||||||
------------ | -------------
|
------------ | -------------
|
||||||
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
|
*Name* | The data source name. This is how you refer to the data source in panels & queries.
|
||||||
Default | Default data source means that it will be pre-selected for new panels.
|
*Default* | Default data source means that it will be pre-selected for new panels.
|
||||||
Url | The http protocol, ip and port of you opentsdb server (default port is usually 4242)
|
*Url* | The http protocol, ip and port of you opentsdb server (default port is usually 4242)
|
||||||
Access | Proxy = access via Grafana backend, Direct = access directly from browser.
|
*Access* | Proxy = access via Grafana backend, Direct = access directly from browser.
|
||||||
Version | Version = opentsdb version, either <=2.1 or 2.2
|
*Version* | Version = opentsdb version, either <=2.1 or 2.2
|
||||||
Resolution | Metrics from opentsdb may have datapoints with either second or millisecond resolution.
|
*Resolution* | Metrics from opentsdb may have datapoints with either second or millisecond resolution.
|
||||||
|
|
||||||
|
|
||||||
## Query editor
|
## Query editor
|
||||||
Open a graph in edit mode by click the title. Query editor will differ if the datasource has version <=2.1 or = 2.2. In the former version, only tags can be used to query opentsdb. But in the latter version, filters as well as tags can be used to query opentsdb. Fill Policy is also introduced in opentsdb 2.2.
|
|
||||||
|
|
||||||
> Note: While using Opentsdb 2.2 datasource, make sure you use either Filters or Tags as they are mutually exclusive. If used together, might give you weird results.
|
Open a graph in edit mode by click the title. Query editor will differ if the datasource has version <=2.1 or = 2.2.
|
||||||
|
In the former version, only tags can be used to query OpenTSDB. But in the latter version, filters as well as tags
|
||||||
|
can be used to query opentsdb. Fill Policy is also introduced in OpenTSDB 2.2.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
> Note: While using OpenTSDB 2.2 datasource, make sure you use either Filters or Tags as they are mutually exclusive. If used together, might give you weird results.
|
||||||
|
|
||||||
### Auto complete suggestions
|
### Auto complete suggestions
|
||||||
As soon as you start typing metric names, tag names and tag values , you should see highlighted auto complete suggestions for them.
|
|
||||||
|
|
||||||
> Note: This is required for the OpenTSDB `suggest` api to work.
|
As soon as you start typing metric names, tag names and tag values , you should see highlighted auto complete suggestions for them.
|
||||||
|
The autocomplete only works if the OpenTSDB suggest api is enabled.
|
||||||
|
|
||||||
## Templating queries
|
## Templating queries
|
||||||
Grafana's OpenTSDB data source now supports template variable values queries. This means you can create template variables that fetch the values from OpenTSDB (for example metric names, tag names, or tag values). The query editor is also enhanced to limiting tags by metric.
|
|
||||||
|
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place.
|
||||||
|
Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data
|
||||||
|
being displayed in your dashboard.
|
||||||
|
|
||||||
|
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different
|
||||||
|
types of template variables.
|
||||||
|
|
||||||
|
### Query variable
|
||||||
|
|
||||||
|
Grafana's OpenTSDB data source supports template variable queries. This means you can create template variables
|
||||||
|
that fetch the values from OpenTSDB. For example, metric names, tag names, or tag values.
|
||||||
|
|
||||||
When using OpenTSDB with a template variable of `query` type you can use following syntax for lookup.
|
When using OpenTSDB with a template variable of `query` type you can use following syntax for lookup.
|
||||||
|
|
||||||
metrics(prefix) // returns metric names with specific prefix (can be empty)
|
Query | Description
|
||||||
tag_names(cpu) // return tag names (i.e. keys) for a specific cpu metric
|
------------ | -------------
|
||||||
tag_values(cpu, hostname) // return tag values for metric cpu and tag key hostname
|
*metrics(prefix)* | Returns metric names with specific prefix (can be empty)
|
||||||
suggest_tagk(prefix) // return tag names (i.e. keys) for all metrics with specific prefix (can be empty)
|
*tag_names(cpu)* | Return tag names (i.e. keys) for a specific cpu metric
|
||||||
suggest_tagv(prefix) // return tag values for all metrics with specific prefix (can be empty)
|
*tag_values(cpu, hostname)* | Return tag values for metric cpu and tag key hostname
|
||||||
|
*suggest_tagk(prefix)* | Return tag names (i.e. keys) for all metrics with specific prefix (can be empty)
|
||||||
|
*suggest_tagv(prefix)* | Return tag values for all metrics with specific prefix (can be empty)
|
||||||
|
|
||||||
If you do not see template variables being populated in `Preview of values` section, you need to enable `tsd.core.meta.enable_realtime_ts` in the OpenTSDB server settings. Also, to populate metadata of the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` on the OpenTSDB server.
|
If you do not see template variables being populated in `Preview of values` section, you need to enable
|
||||||
|
`tsd.core.meta.enable_realtime_ts` in the OpenTSDB server settings. Also, to populate metadata of
|
||||||
|
the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` on the OpenTSDB server.
|
||||||
|
|
||||||
### Nested Templating
|
### Nested Templating
|
||||||
|
|
||||||
One template variable can be used to filter tag values for another template varible. Very importantly, the order of the parameters matter in tag_values function. First parameter is the metric name, second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables. Some examples are mentioned below to make nested template queries work successfully.
|
One template variable can be used to filter tag values for another template varible. First parameter is the metric name,
|
||||||
|
second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables.
|
||||||
|
Some examples are mentioned below to make nested template queries work successfully.
|
||||||
|
|
||||||
tag_values(cpu, hostname, env=$env) // return tag values for cpu metric, selected env tag value and tag key hostname
|
Query | Description
|
||||||
tag_values(cpu, hostanme, env=$env, region=$region) // return tag values for cpu metric, selected env tag value, selected region tag value and tag key hostname
|
------------ | -------------
|
||||||
|
*tag_values(cpu, hostname, env=$env)* | Return tag values for cpu metric, selected env tag value and tag key hostname
|
||||||
|
*tag_values(cpu, hostanme, env=$env, region=$region)* | Return tag values for cpu metric, selected env tag value, selected region tag value and tag key hostname
|
||||||
|
|
||||||
> Note: This is required for the OpenTSDB `lookup` api to work.
|
For details on OpenTSDB metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html)
|
||||||
|
|
||||||
For details on opentsdb metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html)
|
|
||||||
|
@ -10,74 +10,86 @@ parent = "datasources"
|
|||||||
weight = 2
|
weight = 2
|
||||||
+++
|
+++
|
||||||
|
|
||||||
|
|
||||||
# Using Prometheus in Grafana
|
# Using Prometheus in Grafana
|
||||||
|
|
||||||
Grafana includes support for Prometheus Datasources. While the process of adding the datasource is similar to adding a Graphite or OpenTSDB datasource type, Prometheus does have a few different options for building queries.
|
Grafana includes built-in support for Prometheus.
|
||||||
|
|
||||||
## Adding the data source to Grafana
|
## Adding the data source to Grafana
|
||||||
|
|
||||||

|
1. Open the side menu by clicking the Grafana icon in the top header.
|
||||||
|
|
||||||
1. Open the side menu by clicking the the Grafana icon in the top header.
|
|
||||||
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
||||||
|
3. Click the `+ Add data source` button in the top header.
|
||||||
|
4. Select `Prometheus` from the *Type* dropdown.
|
||||||
|
|
||||||
> NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization.
|
> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization.
|
||||||
|
|
||||||
3. Click the `Add new` link in the top header.
|
## Data source options
|
||||||
4. Select `Prometheus` from the dropdown.
|
|
||||||
|
|
||||||
Name | Description
|
Name | Description
|
||||||
------------ | -------------
|
------------ | -------------
|
||||||
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
|
*Name* | The data source name. This is how you refer to the data source in panels & queries.
|
||||||
Default | Default data source means that it will be pre-selected for new panels.
|
*Default* | Default data source means that it will be pre-selected for new panels.
|
||||||
Url | The http protocol, ip and port of you Prometheus server (default port is usually 9090)
|
*Url* | The http protocol, ip and port of you Prometheus server (default port is usually 9090)
|
||||||
Access | Proxy = access via Grafana backend, Direct = access directly from browser.
|
*Access* | Proxy = access via Grafana backend, Direct = access directly from browser.
|
||||||
Basic Auth | Enable basic authentication to the Prometheus datasource.
|
*Basic Auth* | Enable basic authentication to the Prometheus data source.
|
||||||
User | Name of your Prometheus user
|
*User* | Name of your Prometheus user
|
||||||
Password | Database user's password
|
*Password* | Database user's password
|
||||||
|
|
||||||
> Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser.
|
|
||||||
|
|
||||||
> Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source.
|
|
||||||
|
|
||||||
## Query editor
|
## Query editor
|
||||||
Open a graph in edit mode by click the title.
|
|
||||||
|
|
||||||

|
Open a graph in edit mode by click the title > Edit (or by pressing `e` key while hovering over panel).
|
||||||
|
|
||||||
For details on Prometheus metric queries check out the Prometheus documentation
|

|
||||||
- [Query Metrics - Prometheus documentation](http://prometheus.io/docs/querying/basics/).
|
|
||||||
|
|
||||||
## Templated queries
|
|
||||||
|
|
||||||
Prometheus Datasource Plugin provides the following functions in `Variables values query` field in Templating Editor to query `metric names` and `labels names` on the Prometheus server.
|
|
||||||
|
|
||||||
Name | Description
|
Name | Description
|
||||||
------- | --------
|
------- | --------
|
||||||
`label_values(label)` | Returns a list of label values for the `label` in every metric.
|
*Query expression* | Prometheus query expression, check out the [Prometheus documentation](http://prometheus.io/docs/querying/basics/).
|
||||||
`label_values(metric, label)` | Returns a list of label values for the `label` in the specified metric.
|
*Legend format* | Controls the name of the time series, using name or pattern. For example `{{hostname}}` will be replaced with label value for the label `hostname`.
|
||||||
`metrics(metric)` | Returns a list of metrics matching the specified `metric` regex.
|
*Min step* | Set a lower limit for the Prometheus step option. Step controls how big the jumps are when the Prometheus query engine performs range queries. Sadly there is no official prometheus documentation to link to for this very important option.
|
||||||
`query_result(query)` | Returns a list of Prometheus query result for the `query`.
|
*Resolution* | Controls the step option. Small steps create high-resolution graphs but can be slow over larger time ranges, lowering the resolution can speed things up. `1/2` will try to set step option to generate 1 data point for every other pixel. A value of `1/10` will try to set step option so there is a data point every 10 pixels.*Metric lookup* | Search for metric names in this input field.
|
||||||
|
*Format as* | **(New in v4.3)** Switch between Table & Time series. Table format will only work in the Table panel.
|
||||||
|
|
||||||
For details of `metric names` & `label names`, and `label values`, please refer to the [Prometheus documentation](http://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
|
## Templating
|
||||||
|
|
||||||
> Note: The part of queries is incompatible with the version before 2.6, if you specify like `foo.*`, please change like `metrics(foo.*)`.
|
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place.
|
||||||
|
Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data
|
||||||
|
being displayed in your dashboard.
|
||||||
|
|
||||||
You can create a template variable in Grafana and have that variable filled with values from any Prometheus metric exploration query.
|
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different
|
||||||
You can then use this variable in your Prometheus metric queries.
|
types of template variables.
|
||||||
|
|
||||||
For example you can have a variable that contains all values for label `hostname` if you specify a query like this in the templating edit view.
|
### Query variable
|
||||||
|
|
||||||
```sql
|
Variable of the type *Query* allows you to query Prometheus for a list of metrics, labels or label values. The Prometheus data source plugin
|
||||||
label_values(hostname)
|
provides the following functions you can use in the `Query` input field.
|
||||||
```
|
|
||||||
|
|
||||||
You can also use raw queries & regular expressions to extract anything you might need.
|
Name | Description
|
||||||
|
---- | --------
|
||||||
|
*label_values(label)* | Returns a list of label values for the `label` in every metric.
|
||||||
|
*label_values(metric, label)* | Returns a list of label values for the `label` in the specified metric.
|
||||||
|
*metrics(metric)* | Returns a list of metrics matching the specified `metric` regex.
|
||||||
|
*query_result(query)* | Returns a list of Prometheus query result for the `query`.
|
||||||
|
|
||||||
### Using templated variables in queries
|
For details of *metric names*, *label names* and *label values* are please refer to the [Prometheus documentation](http://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
|
||||||
|
|
||||||
When the `Include All` option or `Multi-Value` option is enabled, Grafana converts the labels from plain text to a regex compatible string.
|
### Using variables in queries
|
||||||
Which means you have to use `=~` instead of `=` in your Prometheus queries. For example `ALERTS{instance=~$instance}` instead of `ALERTS{instance=$instance}`.
|
|
||||||
|
|
||||||

|
There are two syntaxes:
|
||||||
|
|
||||||
|
- `$<varname>` Example: rate(http_requests_total{job=~"$job"}[5m])
|
||||||
|
- `[[varname]]` Example: rate(http_requests_total{job="my[[job]]"}[5m])
|
||||||
|
|
||||||
|
Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. When the *Multi-value* or *Include all value*
|
||||||
|
options are enabled, Grafana converts the labels from plain text to a regex compatible string. Which means you have to use `=~` instead of `=`.
|
||||||
|
|
||||||
|
## Annotations
|
||||||
|
|
||||||
|
[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation
|
||||||
|
queries via the Dashboard menu / Annotations view.
|
||||||
|
|
||||||
|
Prometheus supports two ways to query annotations.
|
||||||
|
|
||||||
|
- A regular metric query
|
||||||
|
- A Prometheus query for pending and firing alerts (for details see [Inspecting alerts during runtime](https://prometheus.io/docs/alerting/rules/#inspecting-alerts-during-runtime))
|
||||||
|
|
||||||
|
The step option is useful to limit the number of events returned from your query.
|
||||||
|
@ -11,26 +11,20 @@ weight = 20
|
|||||||
|
|
||||||
# Grafana TestData
|
# Grafana TestData
|
||||||
|
|
||||||
> NOTE: This plugin is disable by default.
|
|
||||||
|
|
||||||
The purpose of this data sources is to make it easier to create fake data for any panel.
|
The purpose of this data sources is to make it easier to create fake data for any panel.
|
||||||
Using `Grafana TestData` you can build your own time series and have any panel render it.
|
Using `Grafana TestData` you can build your own time series and have any panel render it.
|
||||||
This make is much easier to verify functionally since the data can be shared very
|
This make is much easier to verify functionally since the data can be shared very
|
||||||
|
|
||||||
## Enable
|
## Enable
|
||||||
|
|
||||||
`Grafana TestData` is not enabled by default. To enable it you have to go to `/plugins/testdata/edit` and click the enable button to enable it for each server.
|
`Grafana TestData` is not enabled by default. To enable it you have to go to `/plugins/testdata/edit` and click the enable button to enable.
|
||||||
|
|
||||||
## Create mock data.
|
## Create mock data.
|
||||||
|
|
||||||
Once `Grafana TestData` is enabled you use it as a datasource in the metric panel.
|
Once `Grafana TestData` is enabled you can use it as a data source in any metric panel.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Scenarios
|
|
||||||
|
|
||||||
You can now choose different scenario that you want rendered in the drop down menu. If you have scenarios that you think should be added, please add them to `` and submit a pull request.
|
|
||||||
|
|
||||||
## CSV
|
## CSV
|
||||||
|
|
||||||
The comma separated values scenario is the most powerful one since it lets you create any kind of graph you like.
|
The comma separated values scenario is the most powerful one since it lets you create any kind of graph you like.
|
||||||
@ -38,7 +32,6 @@ Once you provided the numbers `Grafana TestData` will distribute them evenly bas
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
## Dashboards
|
## Dashboards
|
||||||
|
|
||||||
`Grafana TestData` also contains some dashboards with example. `/plugins/testdata/edit`
|
`Grafana TestData` also contains some dashboards with example. `/plugins/testdata/edit`
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
title = "Dashboard List"
|
title = "Dashboard List"
|
||||||
keywords = ["grafana", "dashboard list", "documentation", "panel", "dashlist"]
|
keywords = ["grafana", "dashboard list", "documentation", "panel", "dashlist"]
|
||||||
type = "docs"
|
type = "docs"
|
||||||
|
aliases = ["/reference/dashlist/"]
|
||||||
[menu.docs]
|
[menu.docs]
|
||||||
name = "Dashboard list"
|
name = "Dashboard list"
|
||||||
parent = "panels"
|
parent = "panels"
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
title = "Graph Panel"
|
title = "Graph Panel"
|
||||||
keywords = ["grafana", "graph panel", "documentation", "guide", "graph"]
|
keywords = ["grafana", "graph panel", "documentation", "guide", "graph"]
|
||||||
type = "docs"
|
type = "docs"
|
||||||
|
aliases = ["/reference/graph/"]
|
||||||
[menu.docs]
|
[menu.docs]
|
||||||
name = "Graph"
|
name = "Graph"
|
||||||
parent = "panels"
|
parent = "panels"
|
||||||
@ -18,7 +19,8 @@ Clicking the title for a panel exposes a menu. The `edit` option opens addition
|
|||||||
options for the panel.
|
options for the panel.
|
||||||
|
|
||||||
## General
|
## General
|
||||||

|
|
||||||
|

|
||||||
|
|
||||||
The general tab allows customization of a panel's appearance and menu options.
|
The general tab allows customization of a panel's appearance and menu options.
|
||||||
|
|
||||||
@ -31,14 +33,14 @@ The general tab allows customization of a panel's appearance and menu options.
|
|||||||
### Drilldown / detail link
|
### Drilldown / detail link
|
||||||
|
|
||||||
The drilldown section allows adding dynamic links to the panel that can link to other dashboards
|
The drilldown section allows adding dynamic links to the panel that can link to other dashboards
|
||||||
or URLs
|
or URLs.
|
||||||
|
|
||||||
Each link has a title, a type and params. A link can be either a ``dashboard`` or ``absolute`` links.
|
Each link has a title, a type and params. A link can be either a ``dashboard`` or ``absolute`` links.
|
||||||
If it is a dashboard links, the `dashboard` value must be the name of a dashboard. If it's an
|
If it is a dashboard link, the `dashboard` value must be the name of a dashboard. If it is an
|
||||||
`absolute` link, the URL is the URL to link.
|
`absolute` link, the URL is the URL to the link.
|
||||||
|
|
||||||
``params`` allows adding additional URL params to the links. The format is the ``name=value`` with
|
``params`` allows adding additional URL params to the links. The format is the ``name=value`` with
|
||||||
multiple params separate by ``&``. Template variables can be added as values using ``$myvar``.
|
multiple params separated by ``&``. Template variables can be added as values using ``$myvar``.
|
||||||
|
|
||||||
When linking to another dashboard that uses template variables, you can use ``var-myvar=value`` to
|
When linking to another dashboard that uses template variables, you can use ``var-myvar=value`` to
|
||||||
populate the template variable to a desired value from the link.
|
populate the template variable to a desired value from the link.
|
||||||
@ -50,7 +52,7 @@ options.
|
|||||||
|
|
||||||
## Axes & Grid
|
## Axes & Grid
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
The Axes & Grid tab controls the display of axes, grids and legend.
|
The Axes & Grid tab controls the display of axes, grids and legend.
|
||||||
|
|
||||||
@ -74,7 +76,6 @@ values can be hidden from the legend using the ``Hide empty`` checkbox.
|
|||||||
### Legend Values
|
### Legend Values
|
||||||
|
|
||||||
Additional values can be shown along-side the legend names:
|
Additional values can be shown along-side the legend names:
|
||||||
|
|
||||||
- ``Total`` - Sum of all values returned from metric query
|
- ``Total`` - Sum of all values returned from metric query
|
||||||
- ``Current`` - Last value returned from the metric query
|
- ``Current`` - Last value returned from the metric query
|
||||||
- ``Min`` - Minimum of all values returned from metric query
|
- ``Min`` - Minimum of all values returned from metric query
|
||||||
@ -83,16 +84,16 @@ Additional values can be shown along-side the legend names:
|
|||||||
- ``Decimals`` - Controls how many decimals are displayed for legend values (and graph hover tooltips)
|
- ``Decimals`` - Controls how many decimals are displayed for legend values (and graph hover tooltips)
|
||||||
|
|
||||||
The legend values are calculated client side by Grafana and depend on what type of
|
The legend values are calculated client side by Grafana and depend on what type of
|
||||||
aggregation or point consolidation you metric query is using. All the above legend values cannot
|
aggregation or point consolidation your metric query is using. All the above legend values cannot
|
||||||
be correct at the same time. For example if you plot a rate like requests/second, this is probably
|
be correct at the same time. For example if you plot a rate like requests/second, this is probably
|
||||||
using average as aggregator, then the Total in the legend will not represent the total number of requests.
|
using average as aggregator, then the Total in the legend will not represent the total number of requests.
|
||||||
It is just the sum of all data points received by Grafana.
|
It is just the sum of all data points received by Grafana.
|
||||||
|
|
||||||
## Display styles
|
## Display styles
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Display styles controls properties of the graph.
|
Display styles control visual properties of the graph.
|
||||||
|
|
||||||
### Thresholds
|
### Thresholds
|
||||||
|
|
||||||
@ -108,19 +109,19 @@ the graph crosses a particular threshold.
|
|||||||
|
|
||||||
### Line Options
|
### Line Options
|
||||||
|
|
||||||
- ``Line Fill`` - Amount of color fill for a series. 0 is none.
|
- ``Line Fill`` - Amount of color fill for a series. 0 is none.
|
||||||
- ``Line Width`` - The width of the line for a series.
|
- ``Line Width`` - The width of the line for a series.
|
||||||
- ``Null point mode`` - How null values are displayed
|
- ``Null point mode`` - How null values are displayed
|
||||||
- ``Staircase line`` - Draws adjacent points as staircase
|
- ``Staircase line`` - Draws adjacent points as staircase
|
||||||
|
|
||||||
### Multiple Series
|
### Multiple Series
|
||||||
|
|
||||||
If there are multiple series, they can be display as a group.
|
If there are multiple series, they can be displayed as a group.
|
||||||
|
|
||||||
- ``Stack`` - Each series is stacked on top of another
|
- ``Stack`` - Each series is stacked on top of another
|
||||||
- ``Percent`` - Each series is draw as a percent of the total of all series
|
- ``Percent`` - Each series is drawn as a percentage of the total of all series
|
||||||
|
|
||||||
If you have stack enabled you can select what the mouse hover feature should show.
|
If you have stack enabled, you can select what the mouse hover feature should show.
|
||||||
|
|
||||||
- Cumulative - Sum of series below plus the series you hover over
|
- Cumulative - Sum of series below plus the series you hover over
|
||||||
- Individual - Just the value for the series you hover over
|
- Individual - Just the value for the series you hover over
|
||||||
@ -134,11 +135,15 @@ If you have stack enabled you can select what the mouse hover feature should sho
|
|||||||
|
|
||||||
- ``All series`` - Show all series on the same tooltip and a x crosshairs to help follow all series
|
- ``All series`` - Show all series on the same tooltip and a x crosshairs to help follow all series
|
||||||
|
|
||||||
### Series specific overrides
|
### Series Specific Overrides
|
||||||
|
|
||||||
The section allows a series to be render different from the rest. For example, one series can be given
|
The section allows a series to be rendered differently from the others. For example, one series can be given
|
||||||
a thicker line width to make it standout.
|
a thicker line width to make it stand out.
|
||||||
|
|
||||||
## Time range
|
#### Dashes Drawing Style
|
||||||
|
|
||||||
|
There is an option under Series overrides to draw lines as dashes. Set Dashes to the value True to override the line draw setting for a specific series.
|
||||||
|
|
||||||
|
## Time Range
|
||||||
|
|
||||||

|

|
||||||
|
108
docs/sources/features/panels/heatmap.md
Normal file
108
docs/sources/features/panels/heatmap.md
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
+++
|
||||||
|
title = "Heatmap Panel"
|
||||||
|
description = "Heatmap panel documentation"
|
||||||
|
keywords = ["grafana", "heatmap", "panel", "documentation"]
|
||||||
|
type = "docs"
|
||||||
|
[menu.docs]
|
||||||
|
name = "Heatmap"
|
||||||
|
parent = "panels"
|
||||||
|
weight = 3
|
||||||
|
+++
|
||||||
|
|
||||||
|
# Heatmap Panel
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
> New panel only available in Grafana v4.3+
|
||||||
|
|
||||||
|
The Heatmap panel allows you to view histograms over time. To fully understand and use this panel you need
|
||||||
|
understand what Histograms are and how they are created. Read on below to for a quick introduction to the
|
||||||
|
term Histogram.
|
||||||
|
|
||||||
|
## Histograms and buckets
|
||||||
|
|
||||||
|
A histogram is a graphical representation of the distribution of numerical data. You group values into buckets
|
||||||
|
(some times also called bins) and then count how many values fall into each bucket. Instead
|
||||||
|
of graphing the actual values you then graph the buckets. Each bar represents a bucket
|
||||||
|
and the bar height represents the frequency (i.e. count) of values that fell into that bucket's interval.
|
||||||
|
|
||||||
|
Example Histogram:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
The above histogram shows us that most value distribution of a couple of time series. We can easily see that
|
||||||
|
most values land between 240-300 with a peak between 260-280. Histograms just look at value distributions
|
||||||
|
over specific time range. So you cannot see any trend or changes in the distribution over time,
|
||||||
|
this is where heatmaps become useful.
|
||||||
|
|
||||||
|
## Heatmap
|
||||||
|
|
||||||
|
A Heatmap is like a histogram but over time where each time slice represents its own
|
||||||
|
histogram. Instead of using bar height as a representation of frequency you use cells and color
|
||||||
|
the cell proportional to the number of values in the bucket.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Here we can clearly see what values are more common and how they trend over time.
|
||||||
|
|
||||||
|
## Data Options
|
||||||
|
|
||||||
|
Data and bucket options can be found in the `Axes` tab.
|
||||||
|
|
||||||
|
### Data Formats
|
||||||
|
|
||||||
|
Data format | Description
|
||||||
|
------------ | -------------
|
||||||
|
*Time series* | Grafana does the bucketing by going through all time series values. The bucket sizes & intervals will be determined using the Buckets options.
|
||||||
|
*Time series buckets* | Each time series already represents a Y-Axis bucket. The time series name (alias) needs to be a numeric value representing the upper interval for the bucket. Grafana does no bucketing so the bucket size options are hidden.
|
||||||
|
|
||||||
|
### Bucket Size
|
||||||
|
|
||||||
|
The Bucket count & size options are used by Grafana to calculate how big each cell in the heatmap is. You can
|
||||||
|
define the bucket size either by count (the first input box) or by specifying a size interval. For the Y-Axis
|
||||||
|
the size interval is just a value but for the X-bucket you can specify a time range in the *Size* input, for example,
|
||||||
|
the time range `1h`. This will make the cells 1h wide on the X-axis.
|
||||||
|
|
||||||
|
### Pre-bucketed data
|
||||||
|
|
||||||
|
If you have a data that is already organized into buckets you can use the `Time series buckets` data format. This format requires that your metric query return regular time series and that each time series has a numeric name
|
||||||
|
that represent the upper or lower bound of the interval.
|
||||||
|
|
||||||
|
The only data source that supports histograms over time is Elasticsearch. You do this by adding a *Histogram*
|
||||||
|
bucket aggregation before the *Date Histogram*.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
You control the size of the buckets using the Histogram interval (Y-Axis) and the Date Histogram interval (X-axis).
|
||||||
|
|
||||||
|
## Display Options
|
||||||
|
|
||||||
|
In the heatmap *Display* tab you define how the cells are rendered and what color they are assigned.
|
||||||
|
|
||||||
|
### Color Mode & Spectrum
|
||||||
|
|
||||||
|
{{< imgbox max-width="40%" img="/img/docs/v43/heatmap_scheme.png" caption="Color spectrum" >}}
|
||||||
|
|
||||||
|
The color spectrum controls the mapping between value count (in each bucket) and the color assigned to each bucket.
|
||||||
|
The left most color on the spectrum represents the minimum count and the color on the right most side represents the
|
||||||
|
maximum count. Some color schemes are automatically inverted when using the light theme.
|
||||||
|
|
||||||
|
You can also change the color mode to `Opacity`. In this case, the color will not change but the amount of opacity will
|
||||||
|
change with the bucket count.
|
||||||
|
|
||||||
|
## Raw data vs aggregated
|
||||||
|
|
||||||
|
If you use the heatmap with regular time series data (not pre-bucketed). Then it's important to keep in mind that your data
|
||||||
|
is often already by aggregated by your time series backend. Most time series queries do not return raw sample data
|
||||||
|
but include a group by time interval or maxDataPoints limit coupled with an aggregation function (usually average).
|
||||||
|
|
||||||
|
This all depends on the time range of your query of course. But the important point is to know that the Histogram bucketing
|
||||||
|
that Grafana performs may be done on already aggregated and averaged data. To get more accurate heatmaps it is better
|
||||||
|
to do the bucketing during metric collection or store the data in Elasticsearch, which currently is the only data source
|
||||||
|
data supports doing Histogram bucketing on the raw data.
|
||||||
|
|
||||||
|
If you remove or lower the group by time (or raise maxDataPoints) in your query to return more data points your heatmap will be
|
||||||
|
more accurate but this can also be very CPU & Memory taxing for your browser and could cause hangs and crashes if the number of
|
||||||
|
data points becomes unreasonably large.
|
@ -2,6 +2,7 @@
|
|||||||
title = "Singlestat Panel"
|
title = "Singlestat Panel"
|
||||||
keywords = ["grafana", "dashboard", "documentation", "panels", "singlestat"]
|
keywords = ["grafana", "dashboard", "documentation", "panels", "singlestat"]
|
||||||
type = "docs"
|
type = "docs"
|
||||||
|
aliases = ["/reference/singlestat/"]
|
||||||
[menu.docs]
|
[menu.docs]
|
||||||
name = "Singlestat"
|
name = "Singlestat"
|
||||||
parent = "panels"
|
parent = "panels"
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
title = "Table Panel"
|
title = "Table Panel"
|
||||||
keywords = ["grafana", "dashboard", "documentation", "panels", "table panel"]
|
keywords = ["grafana", "dashboard", "documentation", "panels", "table panel"]
|
||||||
type = "docs"
|
type = "docs"
|
||||||
|
aliases = ["/reference/table/"]
|
||||||
[menu.docs]
|
[menu.docs]
|
||||||
name = "Table"
|
name = "Table"
|
||||||
parent = "panels"
|
parent = "panels"
|
||||||
@ -84,8 +85,9 @@ The column styles allow you control how dates and numbers are formatted.
|
|||||||
|
|
||||||
1. `Name or regex`: The Name or Regex field controls what columns the rule should be applied to. The regex or name filter will be matched against the column name not against column values.
|
1. `Name or regex`: The Name or Regex field controls what columns the rule should be applied to. The regex or name filter will be matched against the column name not against column values.
|
||||||
2. `Type`: The three supported types of types are `Number`, `String` and `Date`.
|
2. `Type`: The three supported types of types are `Number`, `String` and `Date`.
|
||||||
3. `Format`: Specify date format. Only available when `Type` is set to `Date`.
|
3. `Title`: Title for the column, when using a Regex the title can include replacement strings like `$1`.
|
||||||
4. `Coloring` and `Thresholds`: Specify color mode and thresholds limits.
|
4. `Format`: Specify date format. Only available when `Type` is set to `Date`.
|
||||||
5. `Unit` and `Decimals`: Specify unit and decimal precision for numbers.
|
5. `Coloring` and `Thresholds`: Specify color mode and thresholds limits.
|
||||||
6. `Add column style rule`: Add new column rule.
|
6. `Unit` and `Decimals`: Specify unit and decimal precision for numbers.
|
||||||
|
7. `Add column style rule`: Add new column rule.
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ weight = -1
|
|||||||
|
|
||||||
## Whats new in Grafana v4.2
|
## Whats new in Grafana v4.2
|
||||||
|
|
||||||
Grafana v4.2 Beta is now [available for download](/download/4_2_0/).
|
Grafana v4.2 Beta is now [available for download](https://grafana.com/grafana/download/4.2.0).
|
||||||
Just like the last release this one contains lots bug fixes and minor improvements.
|
Just like the last release this one contains lots bug fixes and minor improvements.
|
||||||
We are very happy to say that 27 of 40 issues was closed by pull requests from the community.
|
We are very happy to say that 27 of 40 issues was closed by pull requests from the community.
|
||||||
Big thumbs up!
|
Big thumbs up!
|
||||||
|
105
docs/sources/guides/whats-new-in-v4-3.md
Normal file
105
docs/sources/guides/whats-new-in-v4-3.md
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
+++
|
||||||
|
title = "What's New in Grafana v4.3"
|
||||||
|
description = "Feature & improvement highlights for Grafana v4.3"
|
||||||
|
keywords = ["grafana", "new", "documentation", "4.3.0"]
|
||||||
|
type = "docs"
|
||||||
|
[menu.docs]
|
||||||
|
name = "Version 4.3"
|
||||||
|
identifier = "v4.3"
|
||||||
|
parent = "whatsnew"
|
||||||
|
weight = -2
|
||||||
|
+++
|
||||||
|
|
||||||
|
## What's New in Grafana v4.3
|
||||||
|
|
||||||
|
Grafana v4.3 Beta is now [available for download](https://grafana.com/grafana/download/4.3.0-beta1).
|
||||||
|
|
||||||
|
## Release Highlights
|
||||||
|
|
||||||
|
- New [Heatmap Panel](http://docs.grafana.org/features/panels/heatmap/)
|
||||||
|
- Graph Panel Histogram Mode
|
||||||
|
- Elasticsearch Histogram Aggregation
|
||||||
|
- Prometheus Table data format
|
||||||
|
- New [MySQL Data Source](http://docs.grafana.org/features/datasources/mysql/) (alpha version to get some early feedback)
|
||||||
|
- 60+ small fixes and improvements, most of them contributed by our fantastic community!
|
||||||
|
|
||||||
|
Check out the [New Features in v4.3 Dashboard](http://play.grafana.org/dashboard/db/new-features-in-v4-3?orgId=1) on the Grafana Play site for a showcase of these new features.
|
||||||
|
|
||||||
|
## Histogram Support
|
||||||
|
|
||||||
|
A Histogram is a kind of bar chart that groups numbers into ranges, often called buckets or bins. Taller bars show that more data falls in that range.
|
||||||
|
|
||||||
|
The Graph Panel now supports Histograms.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Histogram Aggregation Support for Elasticsearch
|
||||||
|
|
||||||
|
Elasticsearch is the only supported data source that can return pre-bucketed data (data that is already grouped into ranges). With other data sources there is a risk of returning inaccurate data in a histogram due to using already aggregated data rather than raw data. This release adds support for Elasticsearch pre-bucketed data that can be visualized with the new [Heatmap Panel](http://docs.grafana.org/features/panels/heatmap/).
|
||||||
|
|
||||||
|
## Heatmap Panel
|
||||||
|
|
||||||
|
The Histogram support in the Graph Panel does not show changes over time - it aggregates all the data together for the chosen time range. To visualize a histogram over time, we have built a new [Heatmap Panel](http://docs.grafana.org/features/panels/heatmap/).
|
||||||
|
|
||||||
|
Every column in a Heatmap is a histogram snapshot. Instead of visualizing higher values with higher bars, a heatmap visualizes higher values with color. The histogram shown above is equivalent to one column in the heatmap shown below.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
The Heatmap panel also works with Elasticsearch Histogram Aggregations for more accurate server side bucketing.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## MySQL Data Source (alpha)
|
||||||
|
|
||||||
|
This release includes a [new core data source for MySQL](http://docs.grafana.org/features/datasources/mysql/). You can write any possible MySQL query and format it as either Time Series or Table Data allowing it be used with the Graph Panel, Table Panel and SingleStat Panel.
|
||||||
|
|
||||||
|
We are still working on the MySQL data source. As it's missing some important features, like templating and macros and future changes could be breaking, we are
|
||||||
|
labeling the state of the data source as Alpha. Instead of holding up the release of v4.3 we are including it in its current shape to get some early feedback. So please try it out and let us know what you think on [twitter](https://twitter.com/intent/tweet?text=.%40grafana&source=4_3_beta_blog&related=blog) or on our [community forum](https://community.grafana.com/c/releases). Is this a feature that you would use? How can we make it better?
|
||||||
|
|
||||||
|
**The query editor can show the generated and interpolated SQL that is sent to the MySQL server.**
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**The query editor will also show any errors that resulted from running the query (very useful when you have a syntax error!).**
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Health Check Endpoint
|
||||||
|
|
||||||
|
Now you can monitor the monitoring with the Health Check Endpoint! The new `/api/health` endpoint returns HTTP 200 OK if everything is up and HTTP 503 Error if the Grafana database cannot be pinged.
|
||||||
|
|
||||||
|
## Lazy Load Panels
|
||||||
|
|
||||||
|
Grafana now delays loading panels until they become visible (scrolled into view). This means panels out of view are not sending requests thereby reducing the load on your time series database.
|
||||||
|
|
||||||
|
## Prometheus - Table Data (column per label)
|
||||||
|
|
||||||
|
The Prometheus data source now supports the Table Data format by automatically assigning a column to a label. This makes it really easy to browse data in the table panel.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Other Highlights From The Changelog
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
- **Table**: Support to change column header text [#3551](https://github.com/grafana/grafana/issues/3551)
|
||||||
|
- **InfluxDB**: influxdb query builder support for ORDER BY and LIMIT (allows TOPN queries) [#6065](https://github.com/grafana/grafana/issues/6065) Support influxdb's SLIMIT Feature [#7232](https://github.com/grafana/grafana/issues/7232) thx [@thuck](https://github.com/thuck)
|
||||||
|
- **Graph**: Support auto grid min/max when using log scale [#3090](https://github.com/grafana/grafana/issues/3090), thx [@bigbenhur](https://github.com/bigbenhur)
|
||||||
|
- **Prometheus**: Make Prometheus query field a textarea [#7663](https://github.com/grafana/grafana/issues/7663), thx [@hagen1778](https://github.com/hagen1778)
|
||||||
|
- **Server**: Support listening on a UNIX socket [#4030](https://github.com/grafana/grafana/issues/4030), thx [@mitjaziv](https://github.com/mitjaziv)
|
||||||
|
|
||||||
|
Fixes:
|
||||||
|
|
||||||
|
- **MySQL**: 4-byte UTF8 not supported when using MySQL database (allows Emojis in Dashboard Names) [#7958](https://github.com/grafana/grafana/issues/7958)
|
||||||
|
- **Dashboard**: Description tooltip is not fully displayed [#7970](https://github.com/grafana/grafana/issues/7970)
|
||||||
|
|
||||||
|
Lots more enhancements and fixes can be found in the [Changelog](https://github.com/grafana/grafana/blob/master/CHANGELOG.md).
|
||||||
|
|
||||||
|
## Download
|
||||||
|
|
||||||
|
Head to the [v4.3 download page](https://grafana.com/grafana/download) for download links & instructions.
|
||||||
|
|
||||||
|
## Thanks
|
||||||
|
|
||||||
|
A big thanks to all the Grafana users who contribute by submitting PRs, bug reports, helping out on our [community site](https://community.grafana.com/) and providing feedback!
|
||||||
|
|
@ -237,12 +237,14 @@ Change password for specific user
|
|||||||
Accept: application/json
|
Accept: application/json
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{"password":"userpassword"}
|
||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200
|
HTTP/1.1 200
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
|
|
||||||
{"password":"userpassword"}
|
{"message": "User password updated"}
|
||||||
|
|
||||||
## Permissions
|
## Permissions
|
||||||
|
|
||||||
@ -254,6 +256,8 @@ Change password for specific user
|
|||||||
Accept: application/json
|
Accept: application/json
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{"isGrafanaAdmin": true}
|
||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200
|
HTTP/1.1 200
|
||||||
@ -280,14 +284,22 @@ Change password for specific user
|
|||||||
|
|
||||||
## Pause all alerts
|
## Pause all alerts
|
||||||
|
|
||||||
`DELETE /api/admin/pause-all-alerts`
|
`POST /api/admin/pause-all-alerts`
|
||||||
|
|
||||||
**Example Request**:
|
**Example Request**:
|
||||||
|
|
||||||
DELETE /api/admin/pause-all-alerts HTTP/1.1
|
POST /api/admin/pause-all-alerts HTTP/1.1
|
||||||
Accept: application/json
|
Accept: application/json
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"paused": true
|
||||||
|
}
|
||||||
|
|
||||||
|
JSON Body schema:
|
||||||
|
|
||||||
|
- **paused** – If true then all alerts are to be paused, false unpauses all alerts.
|
||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200
|
HTTP/1.1 200
|
||||||
|
@ -12,8 +12,8 @@ parent = "http_api"
|
|||||||
|
|
||||||
# Alerting API
|
# Alerting API
|
||||||
|
|
||||||
You can use the Alerting API to get information about alerts and their states but this API cannot be used to modify the alert.
|
You can use the Alerting API to get information about alerts and their states but this API cannot be used to modify the alert.
|
||||||
To create new alerts or modify them you need to update the dashboard json that contains the alerts.
|
To create new alerts or modify them you need to update the dashboard json that contains the alerts.
|
||||||
|
|
||||||
This API can also be used to create, update and delete alert notifications.
|
This API can also be used to create, update and delete alert notifications.
|
||||||
|
|
||||||
@ -28,6 +28,17 @@ This API can also be used to create, update and delete alert notifications.
|
|||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
|
||||||
|
Querystring Parameters:
|
||||||
|
|
||||||
|
These parameters are used as querystring parameters. For example:
|
||||||
|
|
||||||
|
`/api/alerts?dashboardId=1`
|
||||||
|
|
||||||
|
- **dashboardId** – Return alerts for a specified dashboard.
|
||||||
|
- **panelId** – Return alerts for a specified panel on a dashboard.
|
||||||
|
- **limit** - Limit response to x number of alerts.
|
||||||
|
- **state** - Return alerts with one or more of the following alert states: `ALL`,`no_data`, `paused`, `alerting`, `ok`, `pending`. To specify multiple states use the following format: `?state=paused&state=alerting`
|
||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200
|
HTTP/1.1 200
|
||||||
@ -40,6 +51,13 @@ This API can also be used to create, update and delete alert notifications.
|
|||||||
"name": "fire place sensor",
|
"name": "fire place sensor",
|
||||||
"message": "Someone is trying to break in through the fire place",
|
"message": "Someone is trying to break in through the fire place",
|
||||||
"state": "alerting",
|
"state": "alerting",
|
||||||
|
"evalDate": "0001-01-01T00:00:00Z",
|
||||||
|
"evalData": [
|
||||||
|
{
|
||||||
|
"metric": "fire",
|
||||||
|
"tags": null,
|
||||||
|
"value": 5.349999999999999
|
||||||
|
}
|
||||||
"newStateDate": "2016-12-25",
|
"newStateDate": "2016-12-25",
|
||||||
"executionError": "",
|
"executionError": "",
|
||||||
"dashboardUri": "http://grafana.com/dashboard/db/sensors"
|
"dashboardUri": "http://grafana.com/dashboard/db/sensors"
|
||||||
@ -73,7 +91,6 @@ This API can also be used to create, update and delete alert notifications.
|
|||||||
"dashboardUri": "http://grafana.com/dashboard/db/sensors"
|
"dashboardUri": "http://grafana.com/dashboard/db/sensors"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
## Pause alert
|
## Pause alert
|
||||||
|
|
||||||
`POST /api/alerts/:id/pause`
|
`POST /api/alerts/:id/pause`
|
||||||
@ -86,10 +103,15 @@ This API can also be used to create, update and delete alert notifications.
|
|||||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
|
||||||
{
|
{
|
||||||
"alertId": 1,
|
|
||||||
"paused": true
|
"paused": true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
The :id query parameter is the id of the alert to be paused or unpaused.
|
||||||
|
|
||||||
|
JSON Body Schema:
|
||||||
|
|
||||||
|
- **paused** – Can be `true` or `false`. True to pause an alert. False to unpause an alert.
|
||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200
|
HTTP/1.1 200
|
||||||
@ -111,11 +133,13 @@ This API can also be used to create, update and delete alert notifications.
|
|||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200
|
HTTP/1.1 200
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
|
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
"name": "Team A",
|
"name": "Team A",
|
||||||
@ -127,11 +151,11 @@ This API can also be used to create, update and delete alert notifications.
|
|||||||
|
|
||||||
## Create alert notification
|
## Create alert notification
|
||||||
|
|
||||||
`POST /api/alerts-notifications`
|
`POST /api/alert-notifications`
|
||||||
|
|
||||||
**Example Request**:
|
**Example Request**:
|
||||||
|
|
||||||
POST /api/alerts-notifications HTTP/1.1
|
POST /api/alert-notifications HTTP/1.1
|
||||||
Accept: application/json
|
Accept: application/json
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
@ -144,29 +168,29 @@ This API can also be used to create, update and delete alert notifications.
|
|||||||
"addresses": "carl@grafana.com;dev@grafana.com"
|
"addresses": "carl@grafana.com;dev@grafana.com"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200
|
HTTP/1.1 200
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
"name": "new alert notification",
|
"name": "new alert notification",
|
||||||
"type": "email",
|
"type": "email",
|
||||||
"isDefault": false,
|
"isDefault": false,
|
||||||
"settings": { addresses: "carl@grafana.com;dev@grafana.com"} }
|
"settings": { addresses: "carl@grafana.com;dev@grafana.com"} }
|
||||||
"created": "2017-01-01 12:34",
|
"created": "2017-01-01 12:34",
|
||||||
"updated": "2017-01-01 12:34"
|
"updated": "2017-01-01 12:34"
|
||||||
}
|
}
|
||||||
|
|
||||||
## Update alert notification
|
## Update alert notification
|
||||||
|
|
||||||
`PUT /api/alerts-notifications/1`
|
`PUT /api/alert-notifications/1`
|
||||||
|
|
||||||
**Example Request**:
|
**Example Request**:
|
||||||
|
|
||||||
PUT /api/alerts-notifications/1 HTTP/1.1
|
PUT /api/alert-notifications/1 HTTP/1.1
|
||||||
Accept: application/json
|
Accept: application/json
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
@ -176,33 +200,33 @@ This API can also be used to create, update and delete alert notifications.
|
|||||||
"name": "new alert notification", //Required
|
"name": "new alert notification", //Required
|
||||||
"type": "email", //Required
|
"type": "email", //Required
|
||||||
"isDefault": false,
|
"isDefault": false,
|
||||||
"settings": {
|
"settings": {
|
||||||
"addresses: "carl@grafana.com;dev@grafana.com"
|
"addresses: "carl@grafana.com;dev@grafana.com"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200
|
HTTP/1.1 200
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
"name": "new alert notification",
|
"name": "new alert notification",
|
||||||
"type": "email",
|
"type": "email",
|
||||||
"isDefault": false,
|
"isDefault": false,
|
||||||
"settings": { addresses: "carl@grafana.com;dev@grafana.com"} }
|
"settings": { addresses: "carl@grafana.com;dev@grafana.com"} }
|
||||||
"created": "2017-01-01 12:34",
|
"created": "2017-01-01 12:34",
|
||||||
"updated": "2017-01-01 12:34"
|
"updated": "2017-01-01 12:34"
|
||||||
}
|
}
|
||||||
|
|
||||||
## Delete alert notification
|
## Delete alert notification
|
||||||
|
|
||||||
`DELETE /api/alerts-notifications/:notificationId`
|
`DELETE /api/alert-notifications/:notificationId`
|
||||||
|
|
||||||
**Example Request**:
|
**Example Request**:
|
||||||
|
|
||||||
DELETE /api/alerts-notifications/1 HTTP/1.1
|
DELETE /api/alert-notifications/1 HTTP/1.1
|
||||||
Accept: application/json
|
Accept: application/json
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
@ -41,3 +41,80 @@ You use the token in all requests in the `Authorization` header, like this:
|
|||||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
|
||||||
The `Authorization` header value should be `Bearer <your api key>`.
|
The `Authorization` header value should be `Bearer <your api key>`.
|
||||||
|
|
||||||
|
# Auth HTTP resources / actions
|
||||||
|
|
||||||
|
## Api Keys
|
||||||
|
|
||||||
|
`GET /api/auth/keys`
|
||||||
|
|
||||||
|
**Example Request**:
|
||||||
|
|
||||||
|
GET /api/auth/keys HTTP/1.1
|
||||||
|
Accept: application/json
|
||||||
|
Content-Type: application/json
|
||||||
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
|
||||||
|
**Example Response**:
|
||||||
|
|
||||||
|
HTTP/1.1 200
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"name": "API",
|
||||||
|
"role": "Admin"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"name": "TestAdmin",
|
||||||
|
"role": "Admin"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
## Create API Key
|
||||||
|
|
||||||
|
`POST /api/auth/keys`
|
||||||
|
|
||||||
|
**Example Request**:
|
||||||
|
|
||||||
|
POST /api/auth/keys HTTP/1.1
|
||||||
|
Accept: application/json
|
||||||
|
Content-Type: application/json
|
||||||
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
|
||||||
|
{
|
||||||
|
"name": "mykey",
|
||||||
|
"role": "Admin"
|
||||||
|
}
|
||||||
|
|
||||||
|
JSON Body schema:
|
||||||
|
|
||||||
|
- **name** – The key name
|
||||||
|
- **role** – Sets the access level/Grafana Role for the key. Can be one of the following values: `Viewer`, `Editor`, `Read Only Editor` or `Admin`.
|
||||||
|
|
||||||
|
**Example Response**:
|
||||||
|
|
||||||
|
HTTP/1.1 200
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{"name":"mykey","key":"eyJrIjoiWHZiSWd3NzdCYUZnNUtibE9obUpESmE3bzJYNDRIc0UiLCJuIjoibXlrZXkiLCJpZCI6MX1="}
|
||||||
|
|
||||||
|
## Delete API Key
|
||||||
|
|
||||||
|
`DELETE /api/auth/keys/:id`
|
||||||
|
|
||||||
|
**Example Request**:
|
||||||
|
|
||||||
|
DELETE /api/auth/keys/3 HTTP/1.1
|
||||||
|
Accept: application/json
|
||||||
|
Content-Type: application/json
|
||||||
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
|
||||||
|
**Example Response**:
|
||||||
|
|
||||||
|
HTTP/1.1 200
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{"message":"API key deleted"}
|
||||||
|
@ -19,26 +19,28 @@ Creates a new dashboard or updates an existing dashboard.
|
|||||||
|
|
||||||
**Example Request for new dashboard**:
|
**Example Request for new dashboard**:
|
||||||
|
|
||||||
POST /api/dashboards/db HTTP/1.1
|
```http
|
||||||
Accept: application/json
|
POST /api/dashboards/db HTTP/1.1
|
||||||
Content-Type: application/json
|
Accept: application/json
|
||||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
Content-Type: application/json
|
||||||
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
|
||||||
{
|
{
|
||||||
"dashboard": {
|
"dashboard": {
|
||||||
"id": null,
|
"id": null,
|
||||||
"title": "Production Overview",
|
"title": "Production Overview",
|
||||||
"tags": [ "templated" ],
|
"tags": [ "templated" ],
|
||||||
"timezone": "browser",
|
"timezone": "browser",
|
||||||
"rows": [
|
"rows": [
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"schemaVersion": 6,
|
"schemaVersion": 6,
|
||||||
"version": 0
|
"version": 0
|
||||||
},
|
},
|
||||||
"overwrite": false
|
"overwrite": false
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
JSON Body schema:
|
JSON Body schema:
|
||||||
|
|
||||||
@ -47,15 +49,17 @@ JSON Body schema:
|
|||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200 OK
|
```http
|
||||||
Content-Type: application/json; charset=UTF-8
|
HTTP/1.1 200 OK
|
||||||
Content-Length: 78
|
Content-Type: application/json; charset=UTF-8
|
||||||
|
Content-Length: 78
|
||||||
|
|
||||||
{
|
{
|
||||||
"slug": "production-overview",
|
"slug": "production-overview",
|
||||||
"status": "success",
|
"status": "success",
|
||||||
"version": 1
|
"version": 1
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
Status Codes:
|
Status Codes:
|
||||||
|
|
||||||
@ -67,14 +71,16 @@ Status Codes:
|
|||||||
The **412** status code is used when a newer dashboard already exists (newer, its version is greater than the version that was sent). The
|
The **412** status code is used when a newer dashboard already exists (newer, its version is greater than the version that was sent). The
|
||||||
same status code is also used if another dashboard exists with the same title. The response body will look like this:
|
same status code is also used if another dashboard exists with the same title. The response body will look like this:
|
||||||
|
|
||||||
HTTP/1.1 412 Precondition Failed
|
```http
|
||||||
Content-Type: application/json; charset=UTF-8
|
HTTP/1.1 412 Precondition Failed
|
||||||
Content-Length: 97
|
Content-Type: application/json; charset=UTF-8
|
||||||
|
Content-Length: 97
|
||||||
|
|
||||||
{
|
{
|
||||||
"message": "The dashboard has been changed by someone else",
|
"message": "The dashboard has been changed by someone else",
|
||||||
"status": "version-mismatch"
|
"status": "version-mismatch"
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
In in case of title already exists the `status` property will be `name-exists`.
|
In in case of title already exists the `status` property will be `name-exists`.
|
||||||
|
|
||||||
@ -86,34 +92,38 @@ Will return the dashboard given the dashboard slug. Slug is the url friendly ver
|
|||||||
|
|
||||||
**Example Request**:
|
**Example Request**:
|
||||||
|
|
||||||
GET /api/dashboards/db/production-overview HTTP/1.1
|
```http
|
||||||
Accept: application/json
|
GET /api/dashboards/db/production-overview HTTP/1.1
|
||||||
Content-Type: application/json
|
Accept: application/json
|
||||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
Content-Type: application/json
|
||||||
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
```
|
||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200
|
```http
|
||||||
Content-Type: application/json
|
HTTP/1.1 200
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
{
|
{
|
||||||
"meta": {
|
"meta": {
|
||||||
"isStarred": false,
|
"isStarred": false,
|
||||||
"slug": "production-overview"
|
"slug": "production-overview"
|
||||||
},
|
},
|
||||||
"dashboard": {
|
"dashboard": {
|
||||||
"id": null,
|
"id": null,
|
||||||
"title": "Production Overview",
|
"title": "Production Overview",
|
||||||
"tags": [ "templated" ],
|
"tags": [ "templated" ],
|
||||||
"timezone": "browser",
|
"timezone": "browser",
|
||||||
"rows": [
|
"rows": [
|
||||||
{
|
{
|
||||||
}
|
|
||||||
],
|
|
||||||
"schemaVersion": 6,
|
|
||||||
"version": 0
|
|
||||||
}
|
}
|
||||||
}
|
],
|
||||||
|
"schemaVersion": 6,
|
||||||
|
"version": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Delete dashboard
|
## Delete dashboard
|
||||||
|
|
||||||
@ -123,17 +133,21 @@ The above will delete the dashboard with the specified slug. The slug is the url
|
|||||||
|
|
||||||
**Example Request**:
|
**Example Request**:
|
||||||
|
|
||||||
DELETE /api/dashboards/db/test HTTP/1.1
|
```http
|
||||||
Accept: application/json
|
DELETE /api/dashboards/db/test HTTP/1.1
|
||||||
Content-Type: application/json
|
Accept: application/json
|
||||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
Content-Type: application/json
|
||||||
|
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||||
|
```
|
||||||
|
|
||||||
**Example Response**:
|
**Example Response**:
|
||||||
|
|
||||||
HTTP/1.1 200
|
```http
|
||||||
Content-Type: application/json
|
HTTP/1.1 200
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
{"title": "Test"}
|
{"title": "Test"}
|
||||||
|
```
|
||||||
|
|
||||||
## Gets the home dashboard
|
## Gets the home dashboard
|
||||||
|
|
||||||
@ -221,10 +235,6 @@ Get all tags of dashboards
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
## Dashboard from JSON file
|
|
||||||
|
|
||||||
`GET /file/:file`
|
|
||||||
|
|
||||||
## Search Dashboards
|
## Search Dashboards
|
||||||
|
|
||||||
`GET /api/search/`
|
`GET /api/search/`
|
||||||
|
@ -229,6 +229,10 @@ Used for signing keep me logged in / remember me cookies.
|
|||||||
Set to `true` to disable the use of Gravatar for user profile images.
|
Set to `true` to disable the use of Gravatar for user profile images.
|
||||||
Default is `false`.
|
Default is `false`.
|
||||||
|
|
||||||
|
### data_source_proxy_whitelist
|
||||||
|
|
||||||
|
Define a white list of allowed ips/domains to use in data sources. Format: `ip_or_domain:port` separated by spaces
|
||||||
|
|
||||||
<hr />
|
<hr />
|
||||||
|
|
||||||
## [users]
|
## [users]
|
||||||
@ -313,7 +317,6 @@ example:
|
|||||||
auth_url = https://github.com/login/oauth/authorize
|
auth_url = https://github.com/login/oauth/authorize
|
||||||
token_url = https://github.com/login/oauth/access_token
|
token_url = https://github.com/login/oauth/access_token
|
||||||
api_url = https://api.github.com/user
|
api_url = https://api.github.com/user
|
||||||
allow_sign_up = false
|
|
||||||
team_ids =
|
team_ids =
|
||||||
allowed_organizations =
|
allowed_organizations =
|
||||||
|
|
||||||
@ -441,20 +444,29 @@ false only pre-existing Grafana users will be able to login (if ldap authenticat
|
|||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
## [auth.proxy]
|
## [auth.proxy]
|
||||||
|
|
||||||
This feature allows you to handle authentication in a http reverse proxy.
|
This feature allows you to handle authentication in a http reverse proxy.
|
||||||
|
|
||||||
### enabled
|
### enabled
|
||||||
|
|
||||||
Defaults to `false`
|
Defaults to `false`
|
||||||
|
|
||||||
### header_name
|
### header_name
|
||||||
|
|
||||||
Defaults to X-WEBAUTH-USER
|
Defaults to X-WEBAUTH-USER
|
||||||
|
|
||||||
#### header_property
|
#### header_property
|
||||||
|
|
||||||
Defaults to username but can also be set to email
|
Defaults to username but can also be set to email
|
||||||
|
|
||||||
### auto_sign_up
|
### auto_sign_up
|
||||||
|
|
||||||
Set to `true` to enable auto sign up of users who do not exist in Grafana DB. Defaults to `true`.
|
Set to `true` to enable auto sign up of users who do not exist in Grafana DB. Defaults to `true`.
|
||||||
|
|
||||||
|
### whitelist
|
||||||
|
|
||||||
|
Limit where auth proxy requests come from by configuring a list of IP addresses. This can be used to prevent users spoofing the X-WEBAUTH-USER header.
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
## [session]
|
## [session]
|
||||||
@ -644,6 +656,9 @@ Secret key. e.g. AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
|||||||
### url
|
### url
|
||||||
Url to where Grafana will send PUT request with images
|
Url to where Grafana will send PUT request with images
|
||||||
|
|
||||||
|
### public_url
|
||||||
|
Optional parameter. Url to send to users in notifications, directly appended with the resulting uploaded file name.
|
||||||
|
|
||||||
### username
|
### username
|
||||||
basic auth username
|
basic auth username
|
||||||
|
|
||||||
|
@ -15,15 +15,28 @@ weight = 1
|
|||||||
|
|
||||||
Description | Download
|
Description | Download
|
||||||
------------ | -------------
|
------------ | -------------
|
||||||
Stable for Debian-based Linux | [4.2.0 (x86-64 deb)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.2.0_amd64.deb)
|
Stable for Debian-based Linux | [grafana_4.3.1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.3.1_amd64.deb)
|
||||||
|
|
||||||
|
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||||
|
installation.
|
||||||
|
|
||||||
## Install Stable
|
## Install Stable
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.3.1_amd64.deb
|
||||||
|
sudo apt-get install -y adduser libfontconfig
|
||||||
|
sudo dpkg -i grafana_4.3.1_amd64.deb
|
||||||
```
|
```
|
||||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.2.0_amd64.deb
|
|
||||||
$ sudo apt-get install -y adduser libfontconfig
|
<!--
|
||||||
$ sudo dpkg -i grafana_4.2.0_amd64.deb
|
## Install Beta
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.3.0-beta1_amd64.deb
|
||||||
|
sudo apt-get install -y adduser libfontconfig
|
||||||
|
sudo dpkg -i grafana_4.3.0-beta1_amd64.deb
|
||||||
```
|
```
|
||||||
|
-->
|
||||||
|
|
||||||
## APT Repository
|
## APT Repository
|
||||||
|
|
||||||
@ -40,18 +53,24 @@ candidates.
|
|||||||
Then add the [Package Cloud](https://packagecloud.io/grafana) key. This
|
Then add the [Package Cloud](https://packagecloud.io/grafana) key. This
|
||||||
allows you to install signed packages.
|
allows you to install signed packages.
|
||||||
|
|
||||||
$ curl https://packagecloud.io/gpg.key | sudo apt-key add -
|
```bash
|
||||||
|
curl https://packagecloud.io/gpg.key | sudo apt-key add -
|
||||||
|
```
|
||||||
|
|
||||||
Update your Apt repositories and install Grafana
|
Update your Apt repositories and install Grafana
|
||||||
|
|
||||||
$ sudo apt-get update
|
```bash
|
||||||
$ sudo apt-get install grafana
|
sudo apt-get update
|
||||||
|
sudo apt-get install grafana
|
||||||
|
```
|
||||||
|
|
||||||
On some older versions of Ubuntu and Debian you may need to install the
|
On some older versions of Ubuntu and Debian you may need to install the
|
||||||
`apt-transport-https` package which is needed to fetch packages over
|
`apt-transport-https` package which is needed to fetch packages over
|
||||||
HTTPS.
|
HTTPS.
|
||||||
|
|
||||||
$ sudo apt-get install -y apt-transport-https
|
```bash
|
||||||
|
sudo apt-get install -y apt-transport-https
|
||||||
|
```
|
||||||
|
|
||||||
## Package details
|
## Package details
|
||||||
|
|
||||||
@ -67,7 +86,9 @@ HTTPS.
|
|||||||
|
|
||||||
Start Grafana by running:
|
Start Grafana by running:
|
||||||
|
|
||||||
$ sudo service grafana-server start
|
```bash
|
||||||
|
sudo service grafana-server start
|
||||||
|
```
|
||||||
|
|
||||||
This will start the `grafana-server` process as the `grafana` user,
|
This will start the `grafana-server` process as the `grafana` user,
|
||||||
which was created during the package installation. The default HTTP port
|
which was created during the package installation. The default HTTP port
|
||||||
@ -75,19 +96,25 @@ is `3000` and default user and group is `admin`.
|
|||||||
|
|
||||||
To configure the Grafana server to start at boot time:
|
To configure the Grafana server to start at boot time:
|
||||||
|
|
||||||
$ sudo update-rc.d grafana-server defaults
|
```bash
|
||||||
|
sudo update-rc.d grafana-server defaults
|
||||||
|
```
|
||||||
|
|
||||||
## Start the server (via systemd)
|
## Start the server (via systemd)
|
||||||
|
|
||||||
To start the service using systemd:
|
To start the service using systemd:
|
||||||
|
|
||||||
$ systemctl daemon-reload
|
```bash
|
||||||
$ systemctl start grafana-server
|
systemctl daemon-reload
|
||||||
$ systemctl status grafana-server
|
systemctl start grafana-server
|
||||||
|
systemctl status grafana-server
|
||||||
|
```
|
||||||
|
|
||||||
Enable the systemd service so that Grafana starts at boot.
|
Enable the systemd service so that Grafana starts at boot.
|
||||||
|
|
||||||
sudo systemctl enable grafana-server.service
|
```bash
|
||||||
|
sudo systemctl enable grafana-server.service
|
||||||
|
```
|
||||||
|
|
||||||
## Environment file
|
## Environment file
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ email = "email"
|
|||||||
[[servers.group_mappings]]
|
[[servers.group_mappings]]
|
||||||
group_dn = "cn=admins,dc=grafana,dc=org"
|
group_dn = "cn=admins,dc=grafana,dc=org"
|
||||||
org_role = "Admin"
|
org_role = "Admin"
|
||||||
# The Grafana organization database id, optional, if left out the default org (id 1) will be used
|
# The Grafana organization database id, optional, if left out the default org (id 1) will be used. Setting this allows for multiple group_dn's to be assigned to the same org_role provided the org_id differs
|
||||||
# org_id = 1
|
# org_id = 1
|
||||||
|
|
||||||
[[servers.group_mappings]]
|
[[servers.group_mappings]]
|
||||||
|
@ -3,9 +3,6 @@ title = "Migrating from older versions"
|
|||||||
description = "Upgrading & Migrating Grafana from older versions"
|
description = "Upgrading & Migrating Grafana from older versions"
|
||||||
keywords = ["grafana", "configuration", "documentation", "migration"]
|
keywords = ["grafana", "configuration", "documentation", "migration"]
|
||||||
type = "docs"
|
type = "docs"
|
||||||
[menu.docs]
|
|
||||||
parent = "installation"
|
|
||||||
weight = 10
|
|
||||||
+++
|
+++
|
||||||
|
|
||||||
# Migrating from older versions
|
# Migrating from older versions
|
||||||
|
@ -1,34 +0,0 @@
|
|||||||
+++
|
|
||||||
title = "Installing via provisioning tools"
|
|
||||||
description = "Guide to install Grafana via provisioning tools like puppet & chef"
|
|
||||||
keywords = ["grafana", "provisioning", "documentation", "puppet", "chef", "ansible"]
|
|
||||||
type = "docs"
|
|
||||||
aliases = ["docs/provisioning"]
|
|
||||||
[menu.docs]
|
|
||||||
parent = "installation"
|
|
||||||
weight = 8
|
|
||||||
+++
|
|
||||||
|
|
||||||
|
|
||||||
# Installing via provisioning tools
|
|
||||||
|
|
||||||
Here are links for how to install Grafana (and some include Graphite or
|
|
||||||
InfluxDB as well) via a provisioning system. These are not maintained by
|
|
||||||
any core Grafana team member and might be out of date.
|
|
||||||
|
|
||||||
### Puppet
|
|
||||||
|
|
||||||
* [forge.puppetlabs.com/bfraser/grafana](https://forge.puppetlabs.com/bfraser/grafana)
|
|
||||||
|
|
||||||
### Ansible
|
|
||||||
|
|
||||||
* [github.com/picotrading/ansible-grafana](https://github.com/picotrading/ansible-grafana)
|
|
||||||
|
|
||||||
### Docker
|
|
||||||
* [github.com/grafana/grafana-docker](https://github.com/grafana/grafana-docker)
|
|
||||||
|
|
||||||
### Chef
|
|
||||||
|
|
||||||
* [github.com/JonathanTron/chef-grafana](https://github.com/JonathanTron/chef-grafana)
|
|
||||||
* [github.com/Nordstrom/grafana2-cookbook](https://github.com/Nordstrom/grafana2-cookbook)
|
|
||||||
|
|
@ -15,25 +15,28 @@ weight = 2
|
|||||||
|
|
||||||
Description | Download
|
Description | Download
|
||||||
------------ | -------------
|
------------ | -------------
|
||||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.2.0 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0-1.x86_64.rpm)
|
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.3.1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.1-1.x86_64.rpm)
|
||||||
|
|
||||||
|
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||||
|
installation.
|
||||||
|
|
||||||
## Install Stable
|
## Install Stable
|
||||||
|
|
||||||
You can install Grafana using Yum directly.
|
You can install Grafana using Yum directly.
|
||||||
|
|
||||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0-1.x86_64.rpm
|
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.1-1.x86_64.rpm
|
||||||
|
|
||||||
Or install manually using `rpm`.
|
Or install manually using `rpm`.
|
||||||
|
|
||||||
#### On CentOS / Fedora / Redhat:
|
#### On CentOS / Fedora / Redhat:
|
||||||
|
|
||||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0-1.x86_64.rpm
|
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.1-1.x86_64.rpm
|
||||||
$ sudo yum install initscripts fontconfig
|
$ sudo yum install initscripts fontconfig
|
||||||
$ sudo rpm -Uvh grafana-4.2.0-1.x86_64.rpm
|
$ sudo rpm -Uvh grafana-4.3.1-1.x86_64.rpm
|
||||||
|
|
||||||
#### On OpenSuse:
|
#### On OpenSuse:
|
||||||
|
|
||||||
$ sudo rpm -i --nodeps grafana-4.2.0-1.x86_64.rpm
|
$ sudo rpm -i --nodeps grafana-4.3.1-1.x86_64.rpm
|
||||||
|
|
||||||
## Install via YUM Repository
|
## Install via YUM Repository
|
||||||
|
|
||||||
|
@ -11,48 +11,24 @@ weight = 8
|
|||||||
|
|
||||||
# Troubleshooting
|
# Troubleshooting
|
||||||
|
|
||||||
This page is dedicated to helping you solve any problem you have getting
|
## visualization & query issues
|
||||||
Grafana to work. Please review it before opening a new [GitHub
|
|
||||||
issue](https://github.com/grafana/grafana/issues/new) or asking a
|
|
||||||
question in the `#grafana` IRC channel on freenode.
|
|
||||||
|
|
||||||
## General connection issues
|
The most common problems are related to the query & response from you data source. Even if it looks
|
||||||
|
like a bug or visualization issue in Grafana it is 99% of time a problem with the data source query or
|
||||||
|
the data source response.
|
||||||
|
|
||||||
When setting up Grafana for the first time you might experience issues
|
So make sure to check the query sent and the raw response, learn how in this guide: [How to troubleshoot metric query issues](https://community.grafana.com/t/how-to-troubleshoot-metric-query-issues/50)
|
||||||
with Grafana being unable to query Graphite, OpenTSDB or InfluxDB. You
|
|
||||||
might not be able to get metric name completion or the graph might show
|
|
||||||
an error like this:
|
|
||||||
|
|
||||||

|
## Logging
|
||||||
|
|
||||||
For some types of errors, the `View details` link will show you error
|
If you encounter an error or problem it is a good idea to check the grafana server log. Usually
|
||||||
details. For many types of HTTP connection errors, however, there is very
|
located at `/var/log/grafana/grafana.log` on unix systems or in `<grafana_install_dir>/data/log` on
|
||||||
little information. The best way to troubleshoot these issues is use
|
other platforms & manual installs.
|
||||||
the [Chrome developer tools](https://developer.chrome.com/devtools/index).
|
|
||||||
By pressing `F12` you can bring up the chrome dev tools.
|
|
||||||
|
|
||||||

|
You can enable more logging by changing log level in you grafana configuration file.
|
||||||
|
|
||||||
There are two important tabs in the Chrome developer tools: `Network`
|
## FAQ
|
||||||
and `Console`. The `Console` tab will show you Javascript errors and
|
|
||||||
HTTP request errors. In the Network tab you will be able to identify the
|
|
||||||
request that failed and review request and response parameters. This
|
|
||||||
information will be of great help in finding the cause of the error.
|
|
||||||
|
|
||||||
If you are unable to solve the issue, even after reading the remainder
|
Checkout the [FAQ](https://community.grafana.com/c/howto/faq) section on our community page for frequently
|
||||||
of this troubleshooting guide, you should open a [GitHub support
|
asked questions.
|
||||||
issue](https://github.com/grafana/grafana/issues). Before you do that
|
|
||||||
please search the existing closed or open issues. Also if you need to
|
|
||||||
create a support issue, screen shots and or text information about the
|
|
||||||
chrome console error, request and response information from the
|
|
||||||
`Network` tab in Chrome developer tools are of great help.
|
|
||||||
|
|
||||||
### Inspecting Grafana metric requests
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
After opening the Chrome developer tools for the first time the
|
|
||||||
`Network` tab is empty. You will need to refresh the page to get
|
|
||||||
requests to show. For some type of errors, especially CORS-related,
|
|
||||||
there might not be a response at all.
|
|
||||||
|
|
||||||
|
103
docs/sources/installation/upgrading.md
Normal file
103
docs/sources/installation/upgrading.md
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
+++
|
||||||
|
title = "Upgrading"
|
||||||
|
description = "Upgrading Grafana guide"
|
||||||
|
keywords = ["grafana", "configuration", "documentation", "upgrade"]
|
||||||
|
type = "docs"
|
||||||
|
[menu.docs]
|
||||||
|
name = "Upgrading"
|
||||||
|
identifier = "upgrading"
|
||||||
|
parent = "installation"
|
||||||
|
weight = 10
|
||||||
|
+++
|
||||||
|
|
||||||
|
# Upgrading Grafana
|
||||||
|
|
||||||
|
We recommend everyone to upgrade Grafana often to stay up to date with the latest fixes and enhancements.
|
||||||
|
In order make this a reality Grafana upgrades are backward compatible and the upgrade process is simple & quick.
|
||||||
|
|
||||||
|
Upgrading is generally always safe (between many minor and one major version) and dashboards and graphs will look the same. There can be minor breaking changes in some edge cases which are usually outlined in the [Release Notes](https://community.grafana.com/c/releases) and [Changelog](https://github.com/grafana/grafana/blob/master/CHANGELOG.md)
|
||||||
|
|
||||||
|
## Database Backup
|
||||||
|
|
||||||
|
Before upgrading it can be a good idea to backup your Grafana database. This will ensure that you can always rollback to your previous version. During startup, Grafana will automatically migrate the database schema (if there are changes or new tables). Sometimes this can cause issues if you later want to downgrade.
|
||||||
|
|
||||||
|
#### sqlite
|
||||||
|
|
||||||
|
If you use sqlite you only need to make a backup of you `grafana.db` file. This is usually located at `/var/lib/grafana/grafana.db` on unix system.
|
||||||
|
If you are unsure what database you use and where it is stored check you grafana configuration file. If you
|
||||||
|
installed grafana to custom location using a binary tar/zip it is usally in `<grafana_install_dir>/data`.
|
||||||
|
|
||||||
|
#### mysql
|
||||||
|
|
||||||
|
```
|
||||||
|
backup:
|
||||||
|
> mysqldump -u root -p[root_password] [grafana] > grafana_backup.sql
|
||||||
|
|
||||||
|
restore:
|
||||||
|
> mysql -u root -p grafana < grafana_backup.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
#### postgres
|
||||||
|
|
||||||
|
```
|
||||||
|
backup:
|
||||||
|
> pg_dump grafana > grafana_backup
|
||||||
|
|
||||||
|
restore:
|
||||||
|
> psql grafana < grafana_backup
|
||||||
|
```
|
||||||
|
|
||||||
|
### Ubuntu / Debian
|
||||||
|
|
||||||
|
If you installed grafana by downloading a debian package (`.deb`) you can just follow the same installation guide
|
||||||
|
and execute the same `dpkg -i` command but with the new package. It will upgrade your Grafana install.
|
||||||
|
|
||||||
|
If you used our APT repository:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install grafana
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Upgrading from binary tar file
|
||||||
|
|
||||||
|
If you downloaded the binary tar package you can just download and extract a new package
|
||||||
|
and overwrite all your existing files. But this might overwrite your config changes. We
|
||||||
|
recommend you place your config changes in a file named `<grafana_install_dir>/conf/custom.ini`
|
||||||
|
as this will make upgrades easier without risking losing your config changes.
|
||||||
|
|
||||||
|
### Centos / RHEL
|
||||||
|
|
||||||
|
If you installed grafana by downloading a rpm package you can just follow the same installation guide
|
||||||
|
and execute the same `yum install` or `rpm -i` command but with the new package. It will upgrade your Grafana install.
|
||||||
|
|
||||||
|
If you used our YUM repository:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo yum update grafana
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker
|
||||||
|
|
||||||
|
This just an example, details depend on how you configured your grafana container.
|
||||||
|
```
|
||||||
|
docker pull grafana
|
||||||
|
docker stop my-grafana-container
|
||||||
|
docker rm my-grafana-container
|
||||||
|
docker run --name=my-grafana-container --restart=always -v /var/lib/grafana:/var/lib/grafana
|
||||||
|
```
|
||||||
|
|
||||||
|
### Windows
|
||||||
|
|
||||||
|
If you downloaded the windows binary package you can just download a newer package and extract
|
||||||
|
to the same location (and overwrite the existing files). This might overwrite your config changes. We
|
||||||
|
recommend you place your config changes in a file named `<grafana_install_dir>/conf/custom.ini`
|
||||||
|
as this will make upgrades easier without risking losing your config changes.
|
||||||
|
|
||||||
|
## Upgrading form 1.x
|
||||||
|
|
||||||
|
[Migrating from 1.x to 2.x]({{< relref "installation/migrating_to2.md" >}})
|
||||||
|
|
||||||
|
## Upgrading form 2.x
|
||||||
|
|
||||||
|
We are not aware of any issues upgrading directly from 2.x to 4.x but to on the safe side go via 3.x.
|
@ -13,7 +13,10 @@ weight = 3
|
|||||||
|
|
||||||
Description | Download
|
Description | Download
|
||||||
------------ | -------------
|
------------ | -------------
|
||||||
Latest stable package for Windows | [grafana.4.2.0.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0.windows-x64.zip)
|
Latest stable package for Windows | [grafana.4.3.1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.1.windows-x64.zip)
|
||||||
|
|
||||||
|
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||||
|
installation.
|
||||||
|
|
||||||
## Configure
|
## Configure
|
||||||
|
|
||||||
|
@ -99,6 +99,6 @@ To manually install a Plugin via the Grafana.com API:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Download the plugin with `https://grafana.com/api/plugins/<plugin id from step 1>/versions/<current version>/download` (for example: https://grafana.com/api/plugins/jdbranham-diagram-panel/versions/1.4.0/download). Unzip the downloaded file into the Grafana Server's `data/plugins` directory.
|
4. Download the plugin with `https://grafana.com/api/plugins/<plugin id from step 1>/versions/<current version>/download` (for example: https://grafana.com/api/plugins/jdbranham-diagram-panel/versions/1.4.0/download). Unzip the downloaded file into the Grafana Server's `plugins` directory.
|
||||||
|
|
||||||
5. Restart the Grafana Server.
|
5. Restart the Grafana Server.
|
||||||
|
@ -13,8 +13,8 @@ dev environment. Grafana ships with its own required backend server; also comple
|
|||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
|
|
||||||
- [Go 1.8](https://golang.org/dl/)
|
- [Go 1.8.1](https://golang.org/dl/)
|
||||||
- [NodeJS](https://nodejs.org/download/)
|
- [NodeJS LTS](https://nodejs.org/download/)
|
||||||
|
|
||||||
## Get Code
|
## Get Code
|
||||||
Create a directory for the project and set your path accordingly. Then download and install Grafana into your $GOPATH directory
|
Create a directory for the project and set your path accordingly. Then download and install Grafana into your $GOPATH directory
|
||||||
|
@ -13,42 +13,28 @@ weight = 2
|
|||||||
Annotations provide a way to mark points on the graph with rich events. When you hover over an annotation
|
Annotations provide a way to mark points on the graph with rich events. When you hover over an annotation
|
||||||
you can get title, tags, and text information for the event.
|
you can get title, tags, and text information for the event.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
To add an annotation query click dashboard settings icon in top menu and select `Annotations` from the
|
## Queries
|
||||||
dropdown. This will open the `Annotations` edit view. Click the `Add` tab to add a new annotation query.
|
|
||||||
|
|
||||||
> Note: Annotations apply to all graphs in a given dashboard, not on a per-panel basis.
|
Annotatation events are fetched via annotation queries. To add a new annotation query to a dashboard
|
||||||
|
open the dashboard settings menu, then select `Annotations`. This will open the dashboard annotations
|
||||||
|
settings view. To create a new annotation query hit the `New` button.
|
||||||
|
|
||||||
## Graphite annotations
|

|
||||||
|
|
||||||
Graphite supports two ways to query annotations.
|
Specify a name for the annotation query. This name is given to the toggle (checkbox) that will allow
|
||||||
|
you to enable/disable showing annotation events from this query. For example you might have two
|
||||||
|
annotation queries named `Deploys` and `Outages`. The toggles will allow you to decide what annotations
|
||||||
|
to show.
|
||||||
|
|
||||||
- A regular metric query, use the `Graphite target expression` text input for this
|
### Annotation query details
|
||||||
- Graphite events query, use the `Graphite event tags` text input, specify an tag or wildcard (leave empty should also work)
|
|
||||||
|
|
||||||
## Elasticsearch annotations
|
The annotation query options are different for each data source.
|
||||||

|
|
||||||
|
|
||||||
Grafana can query any Elasticsearch index for annotation events. The index name can be the name of an alias or an index wildcard pattern.
|
- [Graphite annotation queries]({{< relref "features/datasources/graphite.md#annotations" >}})
|
||||||
You can leave the search query blank or specify a lucene query.
|
- [Elasticsearch annotation queries]({{< relref "features/datasources/elasticsearch.md#annotations" >}})
|
||||||
|
- [InfluxDB annotation queries]({{< relref "features/datasources/influxdb.md#annotations" >}})
|
||||||
|
- [Prometheus annotation queries]({{< relref "features/datasources/prometheus.md#annotations" >}})
|
||||||
|
|
||||||
If your elasticsearch document has a timestamp field other than `@timestamp` you will need to specify that. As well
|
|
||||||
as the name for the fields that should be used for the annotation title, tags and text. Tags and text are optional.
|
|
||||||
|
|
||||||
> **Note** The annotation timestamp field in elasticsearch need to be in UTC format.
|
|
||||||
|
|
||||||
## InfluxDB Annotations
|
|
||||||

|
|
||||||
|
|
||||||
For InfluxDB you need to enter a query like in the above screenshot. You need to have the ```where $timeFilter``` part.
|
|
||||||
If you only select one column you will not need to enter anything in the column mapping fields.
|
|
||||||
|
|
||||||
## Prometheus Annotations
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Prometheus supports two ways to query annotations.
|
|
||||||
|
|
||||||
- A regular metric query
|
|
||||||
- A Prometheus query for pending and firing alerts (for details see [Inspecting alerts during runtime](https://prometheus.io/docs/alerting/rules/#inspecting-alerts-during-runtime))
|
|
||||||
|
@ -65,7 +65,7 @@ Each field in the dashboard JSON is explained below with its usage:
|
|||||||
| **timezone** | timezone of dashboard, i.e. `utc` or `browser` |
|
| **timezone** | timezone of dashboard, i.e. `utc` or `browser` |
|
||||||
| **editable** | whether a dashboard is editable or not |
|
| **editable** | whether a dashboard is editable or not |
|
||||||
| **hideControls** | whether row controls on the left in green are hidden or not |
|
| **hideControls** | whether row controls on the left in green are hidden or not |
|
||||||
| **graphTooltip** | TODO |
|
| **graphTooltip** | 0 for no shared crosshair or tooltip (default), 1 for shared crosshair, 2 for shared crosshair AND shared tooltip |
|
||||||
| **rows** | row metadata, see [rows section](#rows) for details |
|
| **rows** | row metadata, see [rows section](#rows) for details |
|
||||||
| **time** | time range for dashboard, i.e. last 6 hours, last 7 days, etc |
|
| **time** | time range for dashboard, i.e. last 6 hours, last 7 days, etc |
|
||||||
| **timepicker** | timepicker metadata, see [timepicker section](#timepicker) for details |
|
| **timepicker** | timepicker metadata, see [timepicker section](#timepicker) for details |
|
||||||
|
@ -22,7 +22,7 @@ A dashboard snapshot is an instant way to share an interactive dashboard publicl
|
|||||||
(metric, template and annotation) and panel links, leaving only the visible metric data and series names embedded into your dashboard. Dashboard
|
(metric, template and annotation) and panel links, leaving only the visible metric data and series names embedded into your dashboard. Dashboard
|
||||||
snapshots can be accessed by anyone who has the link and can reach the URL.
|
snapshots can be accessed by anyone who has the link and can reach the URL.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Publish snapshots
|
### Publish snapshots
|
||||||
You can publish snapshots to you local instance or to [snapshot.raintank.io](http://snapshot.raintank.io). The later is a free service
|
You can publish snapshots to you local instance or to [snapshot.raintank.io](http://snapshot.raintank.io). The later is a free service
|
||||||
@ -42,8 +42,8 @@ You can embed a panel using an iframe on another web site. This tab will show yo
|
|||||||
Example:
|
Example:
|
||||||
|
|
||||||
```html
|
```html
|
||||||
<iframe src="http://snapshot.raintank.io/dashboard/solo/snapshot/UtvRYDv650fHOV2jV5QlAQhLnNOhB5ZN?panelId=4&fullscreen&from=1427385145990&to=1427388745990" width="650" height="300" frameborder="0"></iframe>
|
<iframe src="https://snapshot.raintank.io/dashboard-solo/snapshot/y7zwi2bZ7FcoTlB93WN7yWO4aMiz3pZb?from=1493369923321&to=1493377123321&panelId=4" width="650" height="300" frameborder="0"></iframe>
|
||||||
```
|
```
|
||||||
|
|
||||||
Below there should be an interactive Grafana graph embedded in an iframe:
|
Below there should be an interactive Grafana graph embedded in an iframe:
|
||||||
<iframe src="https://snapshot.raintank.io/dashboard-solo/snapshot/4IKyWYNEQll1B9FXcN3RIgx4M2VGgU8d?panelId=4&fullscreen" width="650" height="300" frameborder="0"></iframe>
|
<iframe src="https://snapshot.raintank.io/dashboard-solo/snapshot/y7zwi2bZ7FcoTlB93WN7yWO4aMiz3pZb?from=1493369923321&to=1493377123321&panelId=4" width="650" height="300" frameborder="0"></iframe>
|
||||||
|
@ -10,76 +10,162 @@ weight = 1
|
|||||||
|
|
||||||
# Templating
|
# Templating
|
||||||
|
|
||||||
<img class="no-shadow" src="/img/docs/v2/templating_var_list.png">
|
Templating allows for more interactive and dynamic dashboards. Instead of hard-coding things like server, application
|
||||||
|
and sensor name in you metric queries you can use variables in their place. Variables are shown as dropdown select boxes at the top of
|
||||||
|
the dashboard. These dropdowns make it easy to change the data being displayed in your dashboard.
|
||||||
|
|
||||||
Dashboard Templating allows you to make your Dashboards more interactive and dynamic.
|
<img class="no-shadow" src="/img/docs/v4/templated_dash.png">
|
||||||
|
|
||||||
They’re one of the most powerful and most used features of Grafana, and they’ve recently gotten even more attention in Grafana 2.0 and Grafana 2.1.
|
## What is a variable?
|
||||||
|
|
||||||
You can create Dashboard Template variables that can be used practically anywhere in a Dashboard: data queries on individual Panels (within the Query Editor), the names in your legends, or titles in Panels and Rows.
|
A variable is a placeholder for a value. You can use variables in metric queries and in panel titles. So when you change
|
||||||
|
the value, using the dropdown at the top of the dashboard, your panel's metric queries will change to reflect the new value.
|
||||||
|
|
||||||
You can configure Dashboard Templating by clicking the dropdown cog on the top of the Dashboard while viewing it.
|
### Interpolation
|
||||||
|
|
||||||
|
Panel titles and metric queries can refer to variables using two different syntaxes:
|
||||||
|
|
||||||
|
- `$<varname>` Example: apps.frontend.$server.requests.count
|
||||||
|
- `[[varname]]` Example: apps.frontend.[[server]].requests.count
|
||||||
|
|
||||||
|
Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of word. Use
|
||||||
|
the second syntax in expressions like `my.server[[serverNumber]].count`.
|
||||||
|
|
||||||
|
Before queries are sent to your data source the query is **interpolated**, meaning the variable is replaced with its current value. During
|
||||||
|
interpolation the variable value might be **escaped** in order to conform to the syntax of the query language and where it is used.
|
||||||
|
For example, a variable used in a regex expression in an InfluxDB or Prometheus query will be regex escaped. Read the data source specific
|
||||||
|
documentation article for details on value escaping during interpolation.
|
||||||
|
|
||||||
|
### Variable options
|
||||||
|
|
||||||
|
A variable is presented as a dropdown select box at the top of the dashboard. It has a current value and a set of **options**. The **options**
|
||||||
|
is the set of values you can choose from.
|
||||||
|
|
||||||
|
## Adding a variable
|
||||||
|
|
||||||
|
<img class="no-shadow" src="/img/docs/v4/templating_var_list.png">
|
||||||
|
|
||||||
|
You add variables via Dashboard cogs menu > Templating. This opens up a list of variables and a `New` button to create a new variable.
|
||||||
|
|
||||||
|
### Basic variable options
|
||||||
|
|
||||||
|
Option | Description
|
||||||
|
------- | --------
|
||||||
|
*Name* | The name of the variable, this is the name you use when you refer to your variable in your metric queries. Must be unique and contain no white-spaces.
|
||||||
|
*Label* | The name of the dropdown for this variable.
|
||||||
|
*Hide* | Options to hide the dropdown select box.
|
||||||
|
*Type* | Defines the variable type.
|
||||||
|
|
||||||
|
|
||||||
## Variable types
|
### Variable types
|
||||||
|
|
||||||
There are three different types of Template variables: query, custom, and interval.
|
Type | Description
|
||||||
|
------- | --------
|
||||||
|
*Query* | This variable type allows you to write a data source query that usually returns a list of metric names, tag values or keys. For example, a query that returns a list of server names, sensor ids or data centers.
|
||||||
|
*Interval* | This variable can represent time spans. Instead of hard-coding a group by time or date histogram interval, use a variable of this type.
|
||||||
|
*Datasource* | This type allows you to quickly change the data source for an entire Dashboard. Useful if you have multiple instances of a data source in for example different environments.
|
||||||
|
*Custom* | Define the variable options manually using a comma separated list.
|
||||||
|
*Constant* | Define a hidden constant. Useful for metric path prefixes for dashboards you want to share. During dashboard export, constant variables will be made into an import option.
|
||||||
|
*Ad hoc filters* | Very special kind of variable that only works with some data sources, InfluxDB & Elasticsearch currently. It allows you to add key/value filters that will automatically be added to all metric queries that use the specified data source.
|
||||||
|
|
||||||
They can all be used to create dynamic variables that you can use throughout the Dashboard, but they differ in how they get the data for their values.
|
### Query options
|
||||||
|
|
||||||
|
This variable type is the most powerful and complex as it can dynamically fetch its options using a data source query.
|
||||||
|
|
||||||
### Query
|
Option | Description
|
||||||
|
------- | --------
|
||||||
|
*Data source* | The data source target for the query.
|
||||||
|
*Refresh* | Controls when to update the variable option list (values in the dropdown). **On Dashboard Load** will slow down dashboard load as the variable query needs to be completed before dashboard can be initialized. Set this only to **On Time Range Change** if your variable options query contains a time range filter or is dependent on dashboard time range.
|
||||||
|
*Query* | The data source specific query expression.
|
||||||
|
*Regex* | Regex to filter or capture specific parts of the names return by your data source query. Optional.
|
||||||
|
*Sort* | Define sort order for options in dropdown. **Disabled** means that the order of options returned by your data source query will be used.
|
||||||
|
|
||||||
> Note: The Query type is Data Source specific. Please consult the appropriate documentation for your particular Data Source.
|
### Query expressions
|
||||||
|
|
||||||
Query is the most common type of Template variable. Use the `Query` template type to generate a dynamic list of variables, simply by allowing Grafana to explore your Data Source metric namespace when the Dashboard loads.
|
The query expressions are different for each data source.
|
||||||
|
|
||||||
For example a query like `prod.servers.*` will fill the variable with all possible values that exists in that wildcard position (in the case of the Graphite Data Source).
|
- [Graphite templating queries]({{< relref "features/datasources/graphite.md#templating" >}})
|
||||||
|
- [Elasticsearch templating queries]({{< relref "features/datasources/elasticsearch.md#templating" >}})
|
||||||
|
- [InfluxDB templating queries]({{< relref "features/datasources/influxdb.md#templating" >}})
|
||||||
|
- [Prometheus templating queries]({{< relref "features/datasources/prometheus.md#templating" >}})
|
||||||
|
- [OpenTSDB templating queries]({{< relref "features/datasources/prometheus.md#templating" >}})
|
||||||
|
|
||||||
You can even create nested variables that use other variables in their definition. For example `apps.$app.servers.*` uses the variable $app in its own query definition.
|
One thing to note is that query expressions can contain references to other variables and in effect create linked variables.
|
||||||
|
Grafana will detect this and automatically refresh a variable when one of it's containing variables change.
|
||||||
|
|
||||||
You can utilize the special ** All ** value to allow the Dashboard user to query for every single Query variable returned. Grafana will automatically translate ** All ** into the appropriate format for your Data Source.
|
## Selection Options
|
||||||
|
|
||||||
#### Multi-select
|
Option | Description
|
||||||
As of Grafana 2.1, it is now possible to select a subset of Query Template variables (previously it was possible to select an individual value or 'All', not multiple values that were less than All). This is accomplished via the Multi-Select option. If enabled, the Dashboard user will be able to enable and disable individual variables.
|
------- | --------
|
||||||
|
*Mulit-value* | If enabled, the variable will support the selection of multiple options at the same time.
|
||||||
|
*Include All option* | Add a special `All` option whose value includes all options.
|
||||||
|
*Custom all value* | By default the `All` value will include all options in combined expression. This can become very long and can have performance problems. Many times it can be better to specify a custom all value, like a wildcard regex. To make it possible to have custom regex, globs or lucene syntax in the **Custom all value** option it is never escaped so you will have to think avbout what is a valid value for your data source.
|
||||||
|
|
||||||
The Multi-Select functionality is taken a step further with the introduction of Multi-Select Tagging. This functionality allows you to group individual Template variables together under a Tag or Group name.
|
### Formating multiple values
|
||||||
|
|
||||||
For example, if you were using Templating to list all 20 of your applications, you could use Multi-Select Tagging to group your applications by function or region or criticality, etc.
|
Interpolating a variable with multiple values selected is tricky as it is not straight forward how to format the multiple values to into a string that
|
||||||
|
is valid in the given context where the variable is used. Grafana tries to solve this by allowing each data source plugin to
|
||||||
|
inform the templating interpolation engine what format to use for multiple values.
|
||||||
|
|
||||||
> Note: Multi-Select Tagging functionality is currently experimental but is part of Grafana 2.1. To enable this feature click the enable icon when editing Template options for a particular variable.
|
**Graphite**, for example, uses glob expressions. A variable with multiple values would, in this case, be interpolated as `{host1,host2,host3}` if
|
||||||
|
the current variable value was *host1*, *host2* and *host3*.
|
||||||
|
|
||||||
<img class="no-shadow" src="/img/docs/v2/template-tags-config.png">
|
**InfluxDB and Prometheus** uses regex expressions, so the same variable
|
||||||
|
would be interpolated as `(host1|host2|host3)`. Every value would also be regex escaped if not, a value with a regex control character would
|
||||||
|
break the regex expression.
|
||||||
|
|
||||||
Grafana gets the list of tags and the list of values in each tag by performing two queries on your metric namespace.
|
**Elasticsearch** uses lucene query syntax, so the same variable would, in this case, be formatted as `("host1" OR "host2" OR "host3")`. In this case every value
|
||||||
|
needs to be escaped so that the value can contain lucene control words and quotation marks.
|
||||||
|
|
||||||
The Tags query returns a list of Tags.
|
#### Formating troubles
|
||||||
|
|
||||||
The Tag values query returns the values for a given Tag.
|
Automatic escaping & formatting can cause problems and it can be tricky to grasp the logic is behind it.
|
||||||
|
Especially for InfluxDB and Prometheus where the use of regex syntax requires that the variable is used in regex operator context.
|
||||||
|
If you do not want Grafana to do this automatic regex escaping and formatting your only option is to disable the *Multi-value* or *Include All option*
|
||||||
|
options.
|
||||||
|
|
||||||
Note: a proof of concept shim that translates the metric query into a SQL call is provided. This allows you to maintain your tag:value mapping independently of your Data Source.
|
### Value groups/tags
|
||||||
|
|
||||||
Once configured, Multi-Select Tagging provides a convenient way to group and your template variables, and slice your data in the exact way you want. The Tags can be seen on the right side of the template pull-down.
|
If you have a lot of options in the dropdown for a multi-value variable. You can use this feature to group the values into selectable tags.
|
||||||
|
|
||||||

|
Option | Description
|
||||||
|
------- | --------
|
||||||
|
*Tags query* | Data source query that should return a list of tags
|
||||||
|
*Tag values query* | Data source query that should return a list of values for a specified tag key. Use `$tag` in the query to refer the currently selected tag.
|
||||||
|
|
||||||
### Interval
|

|
||||||
|
|
||||||
Use the `Interval` type to create Template variables around time ranges (eg. `1m`,`1h`, `1d`). There is also a special `auto` option that will change depending on the current time range, you can specify how many times the current time range should be divided to calculate the current `auto` range.
|
### Interval variables
|
||||||
|
|
||||||

|
Use the `Interval` type to create a variable that represents a time span (eg. `1m`,`1h`, `1d`). There is also a special `auto` option that will change depending on the current time range. You can specify how many times the current time range should be divided to calculate the current `auto` timespan.
|
||||||
|
|
||||||
### Custom
|
This variable type is useful as a parameter to group by time (for InfluxDB), Date histogram interval (for Elasticsearch) or as a *summarize* function parameter (for Graphite).
|
||||||
|
|
||||||
Use the `Custom` type to manually create Template variables around explicit values that are hard-coded into the Dashboard, and not dependent on any Data Source. You can specify multiple Custom Template values by separating them with a comma.
|
## Repeating Panels
|
||||||
|
|
||||||
## Repeating Panels and Repeating Rows
|
Template variables can be very useful to dynamically change your queries across a whole dashboard. If you want
|
||||||
|
Grafana to dynamically create new panels or rows based on what values you have selected you can use the *Repeat* feature.
|
||||||
|
|
||||||
Template Variables can be very useful to dynamically change what you're visualizing on a given panel. Sometimes, you might want to create entire new Panels (or Rows) based on what Template Variables have been selected. This is now possible in Grafana 2.1.
|
If you have a variable with `Multi-value` or `Include all value` options enabled you can choose one panel or one row and have Grafana repeat that row
|
||||||
|
for every selected value. You find this option under the General tab in panel edit mode. Select the variable to repeat by, and a `min span`.
|
||||||
|
The `min span` controls how small Grafana will make the panels (if you have many values selected). Grafana will automatically adjust the width of
|
||||||
|
each repeated panel so that the whole row is filled. Currently, you cannot mix other panels on a row with a repeated panel.
|
||||||
|
|
||||||
Once you've got your Template variables (of any type) configured the way you'd like, check out the Repeating Panels and Repeating Row documentation
|
Only make changes to the first panel (the original template). To have the changes take effect on all panels you need to trigger a dynamic dashboard re-build.
|
||||||
|
You can do this by either changing the variable value (that is the basis for the repeat) or reload the dashboard.
|
||||||
|
|
||||||
## Screencast - Templated Graphite Queries
|
## Repeating Rows
|
||||||
|
|
||||||
<iframe width="561" height="315" src="//www.youtube.com/embed/FhNUrueWwOk?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" frameborder="0" allowfullscreen></iframe>
|
This option requires you to open the row options view. Hover over the row left side to trigger the row menu, in this menu click `Row Options`. This
|
||||||
|
opens the row options view. Here you find a *Repeat* dropdown where you can select the variable to repeat by.
|
||||||
|
|
||||||
|
### URL state
|
||||||
|
|
||||||
|
Variable values are always synced to the URL using the syntax `var-<varname>=value`.
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
- [Graphite Templated Dashboard](http://play.grafana.org/dashboard/db/graphite-templated-nested)
|
||||||
|
- [Elasticsearch Templated Dashboard](http://play.grafana.org/dashboard/db/elasticsearch-templated)
|
||||||
|
- [InfluxDB Templated Dashboard](http://play.grafana.org/dashboard/db/influxdb-templated-queries)
|
||||||
|
|
||||||
|
@ -9,58 +9,116 @@ weight = 10
|
|||||||
|
|
||||||
# Screencasts
|
# Screencasts
|
||||||
|
|
||||||
|
<div class="video-card-container">
|
||||||
{{< screencast src="https://www.youtube.com/embed/sKNZMtoSHN4?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}}
|
<figure class="video-card">
|
||||||
|
<a href="https://youtu.be/iUj6DwfBh88?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
### Episode 7 - Beginners guide to building dashboards
|
<img src="/img/docs/v4/screencast_generic.png">
|
||||||
|
<i class="fa fa-play"></i>
|
||||||
For newer users of Grafana, this screencast will familiarize you with the general UI and teach you how to build your first Dashboard.
|
</a>
|
||||||
|
<figcaption>
|
||||||
<div class="clearfix"></div>
|
<a href="https://youtu.be/iUj6DwfBh88?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
#10 Dashboard Discovery & Sharing
|
||||||
{{< screencast src="https://www.youtube.com/embed/9ZCMVNxUf6s?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}}
|
</a>
|
||||||
|
</figcaption>
|
||||||
### Episode 6 - Adding data sources, users & organizations
|
</figure>
|
||||||
|
<figure class="video-card">
|
||||||
Now that Grafana has been installed, learn about adding data sources and get a closer look at adding and managing Users and Organizations.
|
<a href="https://youtu.be/d6KicssNzxM?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img src="/img/docs/v4/screencast_generic.png">
|
||||||
<div class="clearfix"></div>
|
<i class="fa fa-play"></i>
|
||||||
|
</a>
|
||||||
{{< screencast src="https://www.youtube.com/embed/E-gMFv85FE8?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}}
|
<figcaption>
|
||||||
|
<a href="https://youtu.be/d6KicssNzxM?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
### Episode 5 - Installation & Configuration on Red Hat / CentOS
|
#9 Using Elasticsearch in Grafana
|
||||||
|
</a>
|
||||||
This screencasts shows how to get Grafana 2.0 installed and configured quickly on RPM-based Linux operating systems.
|
</figcaption>
|
||||||
|
</figure>
|
||||||
<div class="clearfix"></div>
|
<figure class="video-card">
|
||||||
{{< screencast src="https://www.youtube.com/embed/JY22EBOR9hQ?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}}
|
<a href="https://youtu.be/1kJyQKgk_oY?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img src="/img/docs/v4/screencast_generic.png">
|
||||||
### Episode 4 - Installation & Configuration on Ubuntu / Debian
|
<i class="fa fa-play"></i>
|
||||||
|
</a>
|
||||||
Learn how to easily install the dependencies and packages to get Grafana 2.0 up and running on Ubuntu or Debian in just a few minutes.
|
<figcaption>
|
||||||
|
<a href="https://youtu.be/1kJyQKgk_oY?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
<div class="clearfix"></div>
|
#8 What's new in Grafana 3.0
|
||||||
|
</a>
|
||||||
{{< screencast src="https://www.youtube.com/embed/FC13uhFRsVw?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}}
|
</figcaption>
|
||||||
|
</figure>
|
||||||
### Episode 3 - Whats New In Grafana 2.0
|
<figure class="video-card">
|
||||||
|
<a href="https://youtu.be/sKNZMtoSHN4?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
This screencast highlights many of the great new features that were included in the Grafana 2.0 release.
|
<img src="/img/docs/v4/screencast_generic.png">
|
||||||
|
<i class="fa fa-play"></i>
|
||||||
<div class="clearfix"></div>
|
</a>
|
||||||
|
<figcaption>
|
||||||
{{< screencast src="//www.youtube.com/embed/FhNUrueWwOk?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}}
|
<a href="https://youtu.be/sKNZMtoSHN4?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
### Episode 2 - Templated Graphite Queries
|
#7 Beginners guide to building dashboards
|
||||||
|
</a>
|
||||||
The screencast focuses on Templating with the Graphite Data Source. Learn how to make dynamic and adaptable Dashboards for your Graphite metrics.
|
</figcaption>
|
||||||
|
</figure>
|
||||||
<div class="clearfix"></div>
|
<figure class="video-card">
|
||||||
|
<a href="https://youtu.be/9ZCMVNxUf6s?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
{{< screencast src="//www.youtube.com/embed/mgcJPREl3CU?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}}
|
<img src="/img/docs/v4/screencast_generic.png">
|
||||||
### Episode 1 - Building Graphite Queries
|
<i class="fa fa-play"></i>
|
||||||
|
</a>
|
||||||
Learn how the Graphite Query Editor works, and how to use different graphing functions. There's also an introduction to graph display settings.
|
<figcaption>
|
||||||
|
<a href="https://youtu.be/9ZCMVNxUf6s?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
<div class="clearfix"></div>
|
#6 Adding data sources, users & orgs.
|
||||||
|
</a>
|
||||||
|
</figcaption>
|
||||||
|
</figure>
|
||||||
|
<figure class="video-card">
|
||||||
|
<a href="https://youtu.be/E-gMFv84FE8?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img src="/img/docs/v4/screencast_generic.png">
|
||||||
|
<i class="fa fa-play"></i>
|
||||||
|
</a>
|
||||||
|
<figcaption>
|
||||||
|
<a href="https://youtu.be/E-gMFv84FE8?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
#5 Install & Setup on Redhat / Centos
|
||||||
|
</a>
|
||||||
|
</figcaption>
|
||||||
|
</figure>
|
||||||
|
<figure class="video-card">
|
||||||
|
<a href="https://youtu.be/JY22EBOR9hQ?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img src="/img/docs/v4/screencast_generic.png">
|
||||||
|
<i class="fa fa-play"></i>
|
||||||
|
</a>
|
||||||
|
<figcaption>
|
||||||
|
<a href="https://youtu.be/JY22EBOR9hQ?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
#4 Install & Setup on Ubuntu / Debian
|
||||||
|
</a>
|
||||||
|
</figcaption>
|
||||||
|
</figure>
|
||||||
|
<figure class="video-card">
|
||||||
|
<a href="https://youtu.be/FC13uhFRsVw?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img src="/img/docs/v4/screencast_generic.png">
|
||||||
|
<i class="fa fa-play"></i>
|
||||||
|
</a>
|
||||||
|
<figcaption>
|
||||||
|
<a href="https://youtu.be/FC13uhFRsVw?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
#3 Whats New In Grafana 2.0
|
||||||
|
</a>
|
||||||
|
</figcaption>
|
||||||
|
</figure>
|
||||||
|
<figure class="video-card">
|
||||||
|
<a href="https://youtu.be/FhNUrueWwOk?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img src="/img/docs/v4/screencast_generic.png">
|
||||||
|
<i class="fa fa-play"></i>
|
||||||
|
</a>
|
||||||
|
<figcaption>
|
||||||
|
<a href="https://youtu.be/FhNUrueWwOk?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
#2 Templated Graphite Queries
|
||||||
|
</a>
|
||||||
|
</figcaption>
|
||||||
|
</figure>
|
||||||
|
<figure class="video-card">
|
||||||
|
<a href="https://youtu.be/mgcJPREl3CU?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img src="/img/docs/v4/screencast_generic.png">
|
||||||
|
<i class="fa fa-play"></i>
|
||||||
|
</a>
|
||||||
|
<figcaption>
|
||||||
|
<a href="https://youtu.be/mgcJPREl3CU?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||||
|
#1 Building Graphite Queries
|
||||||
|
</a>
|
||||||
|
</figcaption>
|
||||||
|
</figure>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
@ -28,6 +28,29 @@
|
|||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
|
[[if ne .Error "" ]]
|
||||||
|
<table class="row" >
|
||||||
|
<tr>
|
||||||
|
<td class="last">
|
||||||
|
<center>
|
||||||
|
<table class="twelve columns" >
|
||||||
|
<tr>
|
||||||
|
<td class="twelve last">
|
||||||
|
<h5 style="font-weight: bold;">Error message</h5>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td class="twelve last">
|
||||||
|
<p>[[.Error]]</p>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
</center>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
[[end]]
|
||||||
|
|
||||||
[[if ne .State "ok" ]]
|
[[if ne .State "ok" ]]
|
||||||
<table class="row" >
|
<table class="row" >
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -94,8 +94,7 @@ td[class="stack-column-center"] {
|
|||||||
<table class="body" style="background: #2e2e2e;">
|
<table class="body" style="background: #2e2e2e;">
|
||||||
<tr>
|
<tr>
|
||||||
<td class="center" align="center" valign="top">
|
<td class="center" align="center" valign="top">
|
||||||
<center>
|
<center>
|
||||||
|
|
||||||
<table class="row header">
|
<table class="row header">
|
||||||
<tr>
|
<tr>
|
||||||
<td class="center" align="center">
|
<td class="center" align="center">
|
||||||
@ -107,11 +106,11 @@ td[class="stack-column-center"] {
|
|||||||
|
|
||||||
<table class="twelve columns">
|
<table class="twelve columns">
|
||||||
<tr>
|
<tr>
|
||||||
<td class="six sub-columns center">
|
<td class="twelve sub-columns center">
|
||||||
<img class="logo" src="http://grafana.org/assets/img/logo_new_transparent_200x48.png" style="width: 200px; float: none; display: inline">
|
<img class="logo" src="http://grafana.org/assets/img/logo_new_transparent_200x48.png" style="width: 200px; float: none; display: inline">
|
||||||
</td>
|
</td>
|
||||||
<td class="expander"></td>
|
<td class="expander"></td>
|
||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
</td>
|
</td>
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
"company": "Coding Instinct AB"
|
"company": "Coding Instinct AB"
|
||||||
},
|
},
|
||||||
"name": "grafana",
|
"name": "grafana",
|
||||||
"version": "4.3.0-pre1",
|
"version": "4.4.0-pre1",
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "http://github.com/grafana/grafana.git"
|
"url": "http://github.com/grafana/grafana.git"
|
||||||
@ -76,8 +76,8 @@
|
|||||||
"systemjs-builder": "^0.15.34",
|
"systemjs-builder": "^0.15.34",
|
||||||
"tether": "^1.4.0",
|
"tether": "^1.4.0",
|
||||||
"tether-drop": "https://github.com/torkelo/drop",
|
"tether-drop": "https://github.com/torkelo/drop",
|
||||||
"tslint": "^4.0.2",
|
"tslint": "^5.1.0",
|
||||||
"typescript": "^2.1.4",
|
"typescript": "^2.2.2",
|
||||||
"virtual-scroll": "^1.1.1"
|
"virtual-scroll": "^1.1.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -37,14 +37,8 @@ MAX_OPEN_FILES=10000
|
|||||||
PID_FILE=/var/run/$NAME.pid
|
PID_FILE=/var/run/$NAME.pid
|
||||||
DAEMON=/usr/sbin/$NAME
|
DAEMON=/usr/sbin/$NAME
|
||||||
|
|
||||||
|
|
||||||
umask 0027
|
umask 0027
|
||||||
|
|
||||||
if [ `id -u` -ne 0 ]; then
|
|
||||||
echo "You need root privileges to run this script"
|
|
||||||
exit 4
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -x $DAEMON ]; then
|
if [ ! -x $DAEMON ]; then
|
||||||
echo "Program not installed or not executable"
|
echo "Program not installed or not executable"
|
||||||
exit 5
|
exit 5
|
||||||
@ -63,9 +57,16 @@ fi
|
|||||||
|
|
||||||
DAEMON_OPTS="--pidfile=${PID_FILE} --config=${CONF_FILE} cfg:default.paths.data=${DATA_DIR} cfg:default.paths.logs=${LOG_DIR} cfg:default.paths.plugins=${PLUGINS_DIR}"
|
DAEMON_OPTS="--pidfile=${PID_FILE} --config=${CONF_FILE} cfg:default.paths.data=${DATA_DIR} cfg:default.paths.logs=${LOG_DIR} cfg:default.paths.plugins=${PLUGINS_DIR}"
|
||||||
|
|
||||||
|
function checkUser() {
|
||||||
|
if [ `id -u` -ne 0 ]; then
|
||||||
|
echo "You need root privileges to run this script"
|
||||||
|
exit 4
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
start)
|
start)
|
||||||
|
checkUser
|
||||||
log_daemon_msg "Starting $DESC"
|
log_daemon_msg "Starting $DESC"
|
||||||
|
|
||||||
pid=`pidofproc -p $PID_FILE grafana`
|
pid=`pidofproc -p $PID_FILE grafana`
|
||||||
@ -112,6 +113,7 @@ case "$1" in
|
|||||||
log_end_msg $return
|
log_end_msg $return
|
||||||
;;
|
;;
|
||||||
stop)
|
stop)
|
||||||
|
checkUser
|
||||||
log_daemon_msg "Stopping $DESC"
|
log_daemon_msg "Stopping $DESC"
|
||||||
|
|
||||||
if [ -f "$PID_FILE" ]; then
|
if [ -f "$PID_FILE" ]; then
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#! /usr/bin/env bash
|
#! /usr/bin/env bash
|
||||||
version=4.2.0
|
version=4.3.1
|
||||||
|
|
||||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
|
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#! /usr/bin/env bash
|
#! /usr/bin/env bash
|
||||||
deb_ver=4.2.0-beta1
|
deb_ver=4.3.0-beta1
|
||||||
rpm_ver=4.2.0-beta1
|
rpm_ver=4.3.0-beta1
|
||||||
|
|
||||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${deb_ver}_amd64.deb
|
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${deb_ver}_amd64.deb
|
||||||
|
|
||||||
|
@ -36,11 +36,6 @@ MAX_OPEN_FILES=10000
|
|||||||
PID_FILE=/var/run/$NAME.pid
|
PID_FILE=/var/run/$NAME.pid
|
||||||
DAEMON=/usr/sbin/$NAME
|
DAEMON=/usr/sbin/$NAME
|
||||||
|
|
||||||
if [ `id -u` -ne 0 ]; then
|
|
||||||
echo "You need root privileges to run this script"
|
|
||||||
exit 4
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -x $DAEMON ]; then
|
if [ ! -x $DAEMON ]; then
|
||||||
echo "Program not installed or not executable"
|
echo "Program not installed or not executable"
|
||||||
exit 5
|
exit 5
|
||||||
@ -70,8 +65,16 @@ function isRunning() {
|
|||||||
status -p $PID_FILE $NAME > /dev/null 2>&1
|
status -p $PID_FILE $NAME > /dev/null 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function checkUser() {
|
||||||
|
if [ `id -u` -ne 0 ]; then
|
||||||
|
echo "You need root privileges to run this script"
|
||||||
|
exit 4
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
start)
|
start)
|
||||||
|
checkUser
|
||||||
isRunning
|
isRunning
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
echo "Already running."
|
echo "Already running."
|
||||||
@ -115,6 +118,7 @@ case "$1" in
|
|||||||
exit $return
|
exit $return
|
||||||
;;
|
;;
|
||||||
stop)
|
stop)
|
||||||
|
checkUser
|
||||||
echo -n "Stopping $DESC: ..."
|
echo -n "Stopping $DESC: ..."
|
||||||
|
|
||||||
if [ -f "$PID_FILE" ]; then
|
if [ -f "$PID_FILE" ]; then
|
||||||
|
@ -255,6 +255,9 @@ func NotificationTest(c *middleware.Context, dto dtos.NotificationTestCommand) R
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := bus.Dispatch(cmd); err != nil {
|
if err := bus.Dispatch(cmd); err != nil {
|
||||||
|
if err == models.ErrSmtpNotEnabled {
|
||||||
|
return ApiError(412, err.Error(), err)
|
||||||
|
}
|
||||||
return ApiError(500, "Failed to send alert notifications", err)
|
return ApiError(500, "Failed to send alert notifications", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,12 +39,53 @@ func GetAnnotations(c *middleware.Context) Response {
|
|||||||
Text: item.Text,
|
Text: item.Text,
|
||||||
Metric: item.Metric,
|
Metric: item.Metric,
|
||||||
Title: item.Title,
|
Title: item.Title,
|
||||||
|
PanelId: item.PanelId,
|
||||||
|
RegionId: item.RegionId,
|
||||||
|
Type: string(item.Type),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return Json(200, result)
|
return Json(200, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func PostAnnotation(c *middleware.Context, cmd dtos.PostAnnotationsCmd) Response {
|
||||||
|
repo := annotations.GetRepository()
|
||||||
|
|
||||||
|
item := annotations.Item{
|
||||||
|
OrgId: c.OrgId,
|
||||||
|
DashboardId: cmd.DashboardId,
|
||||||
|
PanelId: cmd.PanelId,
|
||||||
|
Epoch: cmd.Time / 1000,
|
||||||
|
Title: cmd.Title,
|
||||||
|
Text: cmd.Text,
|
||||||
|
CategoryId: cmd.CategoryId,
|
||||||
|
NewState: cmd.FillColor,
|
||||||
|
Type: annotations.EventType,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := repo.Save(&item); err != nil {
|
||||||
|
return ApiError(500, "Failed to save annotation", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle regions
|
||||||
|
if cmd.IsRegion {
|
||||||
|
item.RegionId = item.Id
|
||||||
|
|
||||||
|
if err := repo.Update(&item); err != nil {
|
||||||
|
return ApiError(500, "Failed set regionId on annotation", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
item.Id = 0
|
||||||
|
item.Epoch = cmd.TimeEnd
|
||||||
|
|
||||||
|
if err := repo.Save(&item); err != nil {
|
||||||
|
return ApiError(500, "Failed save annotation for region end time", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ApiSuccess("Annotation added")
|
||||||
|
}
|
||||||
|
|
||||||
func DeleteAnnotations(c *middleware.Context, cmd dtos.DeleteAnnotationsCmd) Response {
|
func DeleteAnnotations(c *middleware.Context, cmd dtos.DeleteAnnotationsCmd) Response {
|
||||||
repo := annotations.GetRepository()
|
repo := annotations.GetRepository()
|
||||||
|
|
||||||
|
@ -223,6 +223,13 @@ func (hs *HttpServer) registerRoutes() {
|
|||||||
// Dashboard
|
// Dashboard
|
||||||
r.Group("/dashboards", func() {
|
r.Group("/dashboards", func() {
|
||||||
r.Combo("/db/:slug").Get(GetDashboard).Delete(DeleteDashboard)
|
r.Combo("/db/:slug").Get(GetDashboard).Delete(DeleteDashboard)
|
||||||
|
|
||||||
|
r.Get("/id/:dashboardId/versions", wrap(GetDashboardVersions))
|
||||||
|
r.Get("/id/:dashboardId/versions/:id", wrap(GetDashboardVersion))
|
||||||
|
r.Post("/id/:dashboardId/restore", reqEditorRole, bind(dtos.RestoreDashboardVersionCommand{}), wrap(RestoreDashboardVersion))
|
||||||
|
|
||||||
|
r.Post("/calculate-diff", bind(dtos.CalculateDiffOptions{}), wrap(CalculateDashboardDiff))
|
||||||
|
|
||||||
r.Post("/db", reqEditorRole, bind(m.SaveDashboardCommand{}), wrap(PostDashboard))
|
r.Post("/db", reqEditorRole, bind(m.SaveDashboardCommand{}), wrap(PostDashboard))
|
||||||
r.Get("/file/:file", GetDashboardFromJsonFile)
|
r.Get("/file/:file", GetDashboardFromJsonFile)
|
||||||
r.Get("/home", wrap(GetHomeDashboard))
|
r.Get("/home", wrap(GetHomeDashboard))
|
||||||
@ -253,6 +260,7 @@ func (hs *HttpServer) registerRoutes() {
|
|||||||
r.Post("/tsdb/query", bind(dtos.MetricRequest{}), wrap(QueryMetrics))
|
r.Post("/tsdb/query", bind(dtos.MetricRequest{}), wrap(QueryMetrics))
|
||||||
r.Get("/tsdb/testdata/scenarios", wrap(GetTestDataScenarios))
|
r.Get("/tsdb/testdata/scenarios", wrap(GetTestDataScenarios))
|
||||||
r.Get("/tsdb/testdata/gensql", reqGrafanaAdmin, wrap(GenerateSqlTestData))
|
r.Get("/tsdb/testdata/gensql", reqGrafanaAdmin, wrap(GenerateSqlTestData))
|
||||||
|
r.Get("/tsdb/testdata/random-walk", wrap(GetTestDataRandomWalk))
|
||||||
|
|
||||||
// metrics
|
// metrics
|
||||||
r.Get("/metrics", wrap(GetInternalMetrics))
|
r.Get("/metrics", wrap(GetInternalMetrics))
|
||||||
@ -277,7 +285,10 @@ func (hs *HttpServer) registerRoutes() {
|
|||||||
}, reqEditorRole)
|
}, reqEditorRole)
|
||||||
|
|
||||||
r.Get("/annotations", wrap(GetAnnotations))
|
r.Get("/annotations", wrap(GetAnnotations))
|
||||||
r.Post("/annotations/mass-delete", reqOrgAdmin, bind(dtos.DeleteAnnotationsCmd{}), wrap(DeleteAnnotations))
|
|
||||||
|
r.Group("/annotations", func() {
|
||||||
|
r.Post("/", bind(dtos.PostAnnotationsCmd{}), wrap(PostAnnotation))
|
||||||
|
}, reqEditorRole)
|
||||||
|
|
||||||
// error test
|
// error test
|
||||||
r.Get("/metrics/error", wrap(GenerateError))
|
r.Get("/metrics/error", wrap(GenerateError))
|
||||||
|
@ -16,7 +16,8 @@ func TestECSCredProvider(t *testing.T) {
|
|||||||
defer os.Clearenv()
|
defer os.Clearenv()
|
||||||
os.Setenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", "/abc/123")
|
os.Setenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", "/abc/123")
|
||||||
|
|
||||||
provider := remoteCredProvider(&session.Session{})
|
sess, _ := session.NewSession()
|
||||||
|
provider := remoteCredProvider(sess)
|
||||||
|
|
||||||
So(provider, ShouldNotBeNil)
|
So(provider, ShouldNotBeNil)
|
||||||
|
|
||||||
@ -30,7 +31,8 @@ func TestECSCredProvider(t *testing.T) {
|
|||||||
|
|
||||||
func TestDefaultEC2RoleProvider(t *testing.T) {
|
func TestDefaultEC2RoleProvider(t *testing.T) {
|
||||||
Convey("Running outside an ECS container task", t, func() {
|
Convey("Running outside an ECS container task", t, func() {
|
||||||
provider := remoteCredProvider(&session.Session{})
|
sess, _ := session.NewSession()
|
||||||
|
provider := remoteCredProvider(sess)
|
||||||
|
|
||||||
So(provider, ShouldNotBeNil)
|
So(provider, ShouldNotBeNil)
|
||||||
|
|
||||||
|
@ -30,12 +30,13 @@ var customMetricsDimensionsMap map[string]map[string]map[string]*CustomMetricsCa
|
|||||||
func init() {
|
func init() {
|
||||||
metricsMap = map[string][]string{
|
metricsMap = map[string][]string{
|
||||||
"AWS/ApiGateway": {"4XXError", "5XXError", "CacheHitCount", "CacheMissCount", "Count", "IntegrationLatency", "Latency"},
|
"AWS/ApiGateway": {"4XXError", "5XXError", "CacheHitCount", "CacheMissCount", "Count", "IntegrationLatency", "Latency"},
|
||||||
"AWS/ApplicationELB": {"ActiveConnectionCount", "ClientTLSNegotiationErrorCount", "HealthyHostCount", "HTTPCode_ELB_4XX_Count", "HTTPCode_ELB_5XX_Count", "HTTPCode_Target_2XX_Count", "HTTPCode_Target_3XX_Count", "HTTPCode_Target_4XX_Count", "HTTPCode_Target_5XX_Count", "NewConnectionCount", "ProcessedBytes", "RejectedConnectionCount", "RequestCount", "TargetConnectionErrorCount", "TargetResponseTime", "TargetTLSNegotiationErrorCount", "UnHealthyHostCount"},
|
"AWS/ApplicationELB": {"ActiveConnectionCount", "ClientTLSNegotiationErrorCount", "HealthyHostCount", "HTTPCode_ELB_4XX_Count", "HTTPCode_ELB_5XX_Count", "HTTPCode_Target_2XX_Count", "HTTPCode_Target_3XX_Count", "HTTPCode_Target_4XX_Count", "HTTPCode_Target_5XX_Count", "IPv6ProcessedBytes", "IPv6RequestCount", "NewConnectionCount", "ProcessedBytes", "RejectedConnectionCount", "RequestCount", "TargetConnectionErrorCount", "TargetResponseTime", "TargetTLSNegotiationErrorCount", "UnHealthyHostCount"},
|
||||||
"AWS/AutoScaling": {"GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity", "GroupInServiceInstances", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"},
|
"AWS/AutoScaling": {"GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity", "GroupInServiceInstances", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"},
|
||||||
"AWS/Billing": {"EstimatedCharges"},
|
"AWS/Billing": {"EstimatedCharges"},
|
||||||
"AWS/CloudFront": {"Requests", "BytesDownloaded", "BytesUploaded", "TotalErrorRate", "4xxErrorRate", "5xxErrorRate"},
|
"AWS/CloudFront": {"Requests", "BytesDownloaded", "BytesUploaded", "TotalErrorRate", "4xxErrorRate", "5xxErrorRate"},
|
||||||
"AWS/CloudSearch": {"SuccessfulRequests", "SearchableDocuments", "IndexUtilization", "Partitions"},
|
"AWS/CloudSearch": {"SuccessfulRequests", "SearchableDocuments", "IndexUtilization", "Partitions"},
|
||||||
"AWS/DynamoDB": {"ConditionalCheckFailedRequests", "ConsumedReadCapacityUnits", "ConsumedWriteCapacityUnits", "OnlineIndexConsumedWriteCapacity", "OnlineIndexPercentageProgress", "OnlineIndexThrottleEvents", "ProvisionedReadCapacityUnits", "ProvisionedWriteCapacityUnits", "ReadThrottleEvents", "ReturnedBytes", "ReturnedItemCount", "ReturnedRecordsCount", "SuccessfulRequestLatency", "SystemErrors", "ThrottledRequests", "UserErrors", "WriteThrottleEvents"},
|
"AWS/DMS": {"FreeableMemory", "WriteIOPS", "ReadIOPS", "WriteThroughput", "ReadThroughput", "WriteLatency", "ReadLatency", "SwapUsage", "NetworkTransmitThroughput", "NetworkReceiveThroughput", "FullLoadThroughputBandwidthSource", "FullLoadThroughputBandwidthTarget", "FullLoadThroughputRowsSource", "FullLoadThroughputRowsTarget", "CDCIncomingChanges", "CDCChangesMemorySource", "CDCChangesMemoryTarget", "CDCChangesDiskSource", "CDCChangesDiskTarget", "CDCThroughputBandwidthTarget", "CDCThroughputRowsSource", "CDCThroughputRowsTarget", "CDCLatencySource", "CDCLatencyTarget"},
|
||||||
|
"AWS/DynamoDB": {"ConditionalCheckFailedRequests", "ConsumedReadCapacityUnits", "ConsumedWriteCapacityUnits", "OnlineIndexConsumedWriteCapacity", "OnlineIndexPercentageProgress", "OnlineIndexThrottleEvents", "ProvisionedReadCapacityUnits", "ProvisionedWriteCapacityUnits", "ReadThrottleEvents", "ReturnedBytes", "ReturnedItemCount", "ReturnedRecordsCount", "SuccessfulRequestLatency", "SystemErrors", "TimeToLiveDeletedItemCount", "ThrottledRequests", "UserErrors", "WriteThrottleEvents"},
|
||||||
"AWS/EBS": {"VolumeReadBytes", "VolumeWriteBytes", "VolumeReadOps", "VolumeWriteOps", "VolumeTotalReadTime", "VolumeTotalWriteTime", "VolumeIdleTime", "VolumeQueueLength", "VolumeThroughputPercentage", "VolumeConsumedReadWriteOps", "BurstBalance"},
|
"AWS/EBS": {"VolumeReadBytes", "VolumeWriteBytes", "VolumeReadOps", "VolumeWriteOps", "VolumeTotalReadTime", "VolumeTotalWriteTime", "VolumeIdleTime", "VolumeQueueLength", "VolumeThroughputPercentage", "VolumeConsumedReadWriteOps", "BurstBalance"},
|
||||||
"AWS/EC2": {"CPUCreditUsage", "CPUCreditBalance", "CPUUtilization", "DiskReadOps", "DiskWriteOps", "DiskReadBytes", "DiskWriteBytes", "NetworkIn", "NetworkOut", "NetworkPacketsIn", "NetworkPacketsOut", "StatusCheckFailed", "StatusCheckFailed_Instance", "StatusCheckFailed_System"},
|
"AWS/EC2": {"CPUCreditUsage", "CPUCreditBalance", "CPUUtilization", "DiskReadOps", "DiskWriteOps", "DiskReadBytes", "DiskWriteBytes", "NetworkIn", "NetworkOut", "NetworkPacketsIn", "NetworkPacketsOut", "StatusCheckFailed", "StatusCheckFailed_Instance", "StatusCheckFailed_System"},
|
||||||
"AWS/EC2Spot": {"AvailableInstancePoolsCount", "BidsSubmittedForCapacity", "EligibleInstancePoolCount", "FulfilledCapacity", "MaxPercentCapacityAllocation", "PendingCapacity", "PercentCapacityAllocation", "TargetCapacity", "TerminatingCapacity"},
|
"AWS/EC2Spot": {"AvailableInstancePoolsCount", "BidsSubmittedForCapacity", "EligibleInstancePoolCount", "FulfilledCapacity", "MaxPercentCapacityAllocation", "PendingCapacity", "PercentCapacityAllocation", "TargetCapacity", "TerminatingCapacity"},
|
||||||
@ -68,27 +69,28 @@ func init() {
|
|||||||
"CoreNodesRunning", "CoreNodesPending", "LiveDataNodes", "MRTotalNodes", "MRActiveNodes", "MRLostNodes", "MRUnhealthyNodes", "MRDecommissionedNodes", "MRRebootedNodes",
|
"CoreNodesRunning", "CoreNodesPending", "LiveDataNodes", "MRTotalNodes", "MRActiveNodes", "MRLostNodes", "MRUnhealthyNodes", "MRDecommissionedNodes", "MRRebootedNodes",
|
||||||
"S3BytesWritten", "S3BytesRead", "HDFSUtilization", "HDFSBytesRead", "HDFSBytesWritten", "MissingBlocks", "CorruptBlocks", "TotalLoad", "MemoryTotalMB", "MemoryReservedMB", "MemoryAvailableMB", "MemoryAllocatedMB", "PendingDeletionBlocks", "UnderReplicatedBlocks", "DfsPendingReplicationBlocks", "CapacityRemainingGB",
|
"S3BytesWritten", "S3BytesRead", "HDFSUtilization", "HDFSBytesRead", "HDFSBytesWritten", "MissingBlocks", "CorruptBlocks", "TotalLoad", "MemoryTotalMB", "MemoryReservedMB", "MemoryAvailableMB", "MemoryAllocatedMB", "PendingDeletionBlocks", "UnderReplicatedBlocks", "DfsPendingReplicationBlocks", "CapacityRemainingGB",
|
||||||
"HbaseBackupFailed", "MostRecentBackupDuration", "TimeSinceLastSuccessfulBackup"},
|
"HbaseBackupFailed", "MostRecentBackupDuration", "TimeSinceLastSuccessfulBackup"},
|
||||||
"AWS/ES": {"ClusterStatus.green", "ClusterStatus.yellow", "ClusterStatus.red", "Nodes", "SearchableDocuments", "DeletedDocuments", "CPUUtilization", "FreeStorageSpace", "JVMMemoryPressure", "AutomatedSnapshotFailure", "MasterCPUUtilization", "MasterFreeStorageSpace", "MasterJVMMemoryPressure", "ReadLatency", "WriteLatency", "ReadThroughput", "WriteThroughput", "DiskQueueLength", "ReadIOPS", "WriteIOPS"},
|
"AWS/ES": {"ClusterStatus.green", "ClusterStatus.yellow", "ClusterStatus.red", "ClusterUsedSpace", "Nodes", "SearchableDocuments", "DeletedDocuments", "CPUCreditBalance", "CPUUtilization", "FreeStorageSpace", "JVMMemoryPressure", "AutomatedSnapshotFailure", "MasterCPUCreditBalance", "MasterCPUUtilization", "MasterFreeStorageSpace", "MasterJVMMemoryPressure", "ReadLatency", "WriteLatency", "ReadThroughput", "WriteThroughput", "DiskQueueDepth", "ReadIOPS", "WriteIOPS"},
|
||||||
"AWS/Events": {"Invocations", "FailedInvocations", "TriggeredRules", "MatchedEvents", "ThrottledRules"},
|
"AWS/Events": {"Invocations", "FailedInvocations", "TriggeredRules", "MatchedEvents", "ThrottledRules"},
|
||||||
"AWS/Firehose": {"DeliveryToElasticsearch.Bytes", "DeliveryToElasticsearch.Records", "DeliveryToElasticsearch.Success", "DeliveryToRedshift.Bytes", "DeliveryToRedshift.Records", "DeliveryToRedshift.Success", "DeliveryToS3.Bytes", "DeliveryToS3.DataFreshness", "DeliveryToS3.Records", "DeliveryToS3.Success", "IncomingBytes", "IncomingRecords", "DescribeDeliveryStream.Latency", "DescribeDeliveryStream.Requests", "ListDeliveryStreams.Latency", "ListDeliveryStreams.Requests", "PutRecord.Bytes", "PutRecord.Latency", "PutRecord.Requests", "PutRecordBatch.Bytes", "PutRecordBatch.Latency", "PutRecordBatch.Records", "PutRecordBatch.Requests", "UpdateDeliveryStream.Latency", "UpdateDeliveryStream.Requests"},
|
"AWS/Firehose": {"DeliveryToElasticsearch.Bytes", "DeliveryToElasticsearch.Records", "DeliveryToElasticsearch.Success", "DeliveryToRedshift.Bytes", "DeliveryToRedshift.Records", "DeliveryToRedshift.Success", "DeliveryToS3.Bytes", "DeliveryToS3.DataFreshness", "DeliveryToS3.Records", "DeliveryToS3.Success", "IncomingBytes", "IncomingRecords", "DescribeDeliveryStream.Latency", "DescribeDeliveryStream.Requests", "ListDeliveryStreams.Latency", "ListDeliveryStreams.Requests", "PutRecord.Bytes", "PutRecord.Latency", "PutRecord.Requests", "PutRecordBatch.Bytes", "PutRecordBatch.Latency", "PutRecordBatch.Records", "PutRecordBatch.Requests", "UpdateDeliveryStream.Latency", "UpdateDeliveryStream.Requests"},
|
||||||
"AWS/IoT": {"PublishIn.Success", "PublishOut.Success", "Subscribe.Success", "Ping.Success", "Connect.Success", "GetThingShadow.Accepted"},
|
"AWS/IoT": {"PublishIn.Success", "PublishOut.Success", "Subscribe.Success", "Ping.Success", "Connect.Success", "GetThingShadow.Accepted"},
|
||||||
"AWS/Kinesis": {"GetRecords.Bytes", "GetRecords.IteratorAge", "GetRecords.IteratorAgeMilliseconds", "GetRecords.Latency", "GetRecords.Records", "GetRecords.Success", "IncomingBytes", "IncomingRecords", "PutRecord.Bytes", "PutRecord.Latency", "PutRecord.Success", "PutRecords.Bytes", "PutRecords.Latency", "PutRecords.Records", "PutRecords.Success", "ReadProvisionedThroughputExceeded", "WriteProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "OutgoingBytes", "OutgoingRecords"},
|
"AWS/Kinesis": {"GetRecords.Bytes", "GetRecords.IteratorAge", "GetRecords.IteratorAgeMilliseconds", "GetRecords.Latency", "GetRecords.Records", "GetRecords.Success", "IncomingBytes", "IncomingRecords", "PutRecord.Bytes", "PutRecord.Latency", "PutRecord.Success", "PutRecords.Bytes", "PutRecords.Latency", "PutRecords.Records", "PutRecords.Success", "ReadProvisionedThroughputExceeded", "WriteProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "OutgoingBytes", "OutgoingRecords"},
|
||||||
"AWS/KinesisAnalytics": {"Bytes", "MillisBehindLatest", "Records", "Success"},
|
"AWS/KinesisAnalytics": {"Bytes", "MillisBehindLatest", "Records", "Success"},
|
||||||
"AWS/Lambda": {"Invocations", "Errors", "Duration", "Throttles"},
|
"AWS/Lambda": {"Invocations", "Errors", "Duration", "Throttles", "IteratorAge"},
|
||||||
"AWS/Logs": {"IncomingBytes", "IncomingLogEvents", "ForwardedBytes", "ForwardedLogEvents", "DeliveryErrors", "DeliveryThrottling"},
|
"AWS/Logs": {"IncomingBytes", "IncomingLogEvents", "ForwardedBytes", "ForwardedLogEvents", "DeliveryErrors", "DeliveryThrottling"},
|
||||||
"AWS/ML": {"PredictCount", "PredictFailureCount"},
|
"AWS/ML": {"PredictCount", "PredictFailureCount"},
|
||||||
"AWS/OpsWorks": {"cpu_idle", "cpu_nice", "cpu_system", "cpu_user", "cpu_waitio", "load_1", "load_5", "load_15", "memory_buffers", "memory_cached", "memory_free", "memory_swap", "memory_total", "memory_used", "procs"},
|
"AWS/OpsWorks": {"cpu_idle", "cpu_nice", "cpu_system", "cpu_user", "cpu_waitio", "load_1", "load_5", "load_15", "memory_buffers", "memory_cached", "memory_free", "memory_swap", "memory_total", "memory_used", "procs"},
|
||||||
"AWS/Redshift": {"CPUUtilization", "DatabaseConnections", "HealthStatus", "MaintenanceMode", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "PercentageDiskSpaceUsed", "ReadIOPS", "ReadLatency", "ReadThroughput", "WriteIOPS", "WriteLatency", "WriteThroughput"},
|
"AWS/Redshift": {"CPUUtilization", "DatabaseConnections", "HealthStatus", "MaintenanceMode", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "PercentageDiskSpaceUsed", "ReadIOPS", "ReadLatency", "ReadThroughput", "WriteIOPS", "WriteLatency", "WriteThroughput"},
|
||||||
"AWS/RDS": {"ActiveTransactions", "AuroraBinlogReplicaLag", "AuroraReplicaLag", "AuroraReplicaLagMaximum", "AuroraReplicaLagMinimum", "BinLogDiskUsage", "BlockedTransactions", "BufferCacheHitRatio", "CommitLatency", "CommitThroughput", "CPUCreditBalance", "CPUCreditUsage", "CPUUtilization", "DatabaseConnections", "DDLLatency", "DDLThroughput", "Deadlocks", "DiskQueueDepth", "DMLLatency", "DMLThroughput", "FailedSqlStatements", "FreeableMemory", "FreeStorageSpace", "LoginFailures", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "ReadIOPS", "ReadLatency", "ReadThroughput", "ReplicaLag", "ResultSetCacheHitRatio", "SelectLatency", "SelectThroughput", "SwapUsage", "TotalConnections", "VolumeReadIOPS", "VolumeWriteIOPS", "WriteIOPS", "WriteLatency", "WriteThroughput"},
|
"AWS/RDS": {"ActiveTransactions", "AuroraBinlogReplicaLag", "AuroraReplicaLag", "AuroraReplicaLagMaximum", "AuroraReplicaLagMinimum", "BinLogDiskUsage", "BlockedTransactions", "BufferCacheHitRatio", "CommitLatency", "CommitThroughput", "BinLogDiskUsage", "CPUCreditBalance", "CPUCreditUsage", "CPUUtilization", "DatabaseConnections", "DDLLatency", "DDLThroughput", "Deadlocks", "DeleteLatency", "DeleteThroughput", "DiskQueueDepth", "DMLLatency", "DMLThroughput", "EngineUptime", "FailedSqlStatements", "FreeableMemory", "FreeLocalStorage", "FreeStorageSpace", "InsertLatency", "InsertThroughput", "LoginFailures", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "NetworkThroughput", "Queries", "ReadIOPS", "ReadLatency", "ReadThroughput", "ReplicaLag", "ResultSetCacheHitRatio", "SelectLatency", "SelectThroughput", "SwapUsage", "TotalConnections", "UpdateLatency", "UpdateThroughput", "VolumeBytesUsed", "VolumeReadIOPS", "VolumeWriteIOPS", "WriteIOPS", "WriteLatency", "WriteThroughput"},
|
||||||
"AWS/Route53": {"HealthCheckStatus", "HealthCheckPercentageHealthy", "ConnectionTime", "SSLHandshakeTime", "TimeToFirstByte"},
|
"AWS/Route53": {"ChildHealthCheckHealthyCount", "HealthCheckStatus", "HealthCheckPercentageHealthy", "ConnectionTime", "SSLHandshakeTime", "TimeToFirstByte"},
|
||||||
"AWS/S3": {"BucketSizeBytes", "NumberOfObjects", "AllRequests", "GetRequests", "PutRequests", "DeleteRequests", "HeadRequests", "PostRequests", "ListRequests", "BytesDownloaded", "BytesUploaded", "4xxErrors", "5xxErrors", "FirstByteLatency", "TotalRequestLatency"},
|
"AWS/S3": {"BucketSizeBytes", "NumberOfObjects", "AllRequests", "GetRequests", "PutRequests", "DeleteRequests", "HeadRequests", "PostRequests", "ListRequests", "BytesDownloaded", "BytesUploaded", "4xxErrors", "5xxErrors", "FirstByteLatency", "TotalRequestLatency"},
|
||||||
"AWS/SES": {"Bounce", "Complaint", "Delivery", "Reject", "Send"},
|
"AWS/SES": {"Bounce", "Complaint", "Delivery", "Reject", "Send"},
|
||||||
"AWS/SNS": {"NumberOfMessagesPublished", "PublishSize", "NumberOfNotificationsDelivered", "NumberOfNotificationsFailed"},
|
"AWS/SNS": {"NumberOfMessagesPublished", "PublishSize", "NumberOfNotificationsDelivered", "NumberOfNotificationsFailed"},
|
||||||
"AWS/SQS": {"NumberOfMessagesSent", "SentMessageSize", "NumberOfMessagesReceived", "NumberOfEmptyReceives", "NumberOfMessagesDeleted", "ApproximateNumberOfMessagesDelayed", "ApproximateNumberOfMessagesVisible", "ApproximateNumberOfMessagesNotVisible"},
|
"AWS/SQS": {"NumberOfMessagesSent", "SentMessageSize", "NumberOfMessagesReceived", "NumberOfEmptyReceives", "NumberOfMessagesDeleted", "ApproximateAgeOfOldestMessage", "ApproximateNumberOfMessagesDelayed", "ApproximateNumberOfMessagesVisible", "ApproximateNumberOfMessagesNotVisible"},
|
||||||
"AWS/StorageGateway": {"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "CloudBytesDownloaded", "CloudDownloadLatency", "CloudBytesUploaded", "UploadBufferFree", "UploadBufferPercentUsed", "UploadBufferUsed", "QueuedWrites", "ReadBytes", "ReadTime", "TotalCacheSize", "WriteBytes", "WriteTime", "TimeSinceLastRecoveryPoint", "WorkingStorageFree", "WorkingStoragePercentUsed", "WorkingStorageUsed",
|
"AWS/StorageGateway": {"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "CloudBytesDownloaded", "CloudDownloadLatency", "CloudBytesUploaded", "UploadBufferFree", "UploadBufferPercentUsed", "UploadBufferUsed", "QueuedWrites", "ReadBytes", "ReadTime", "TotalCacheSize", "WriteBytes", "WriteTime", "TimeSinceLastRecoveryPoint", "WorkingStorageFree", "WorkingStoragePercentUsed", "WorkingStorageUsed",
|
||||||
"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "ReadBytes", "ReadTime", "WriteBytes", "WriteTime", "QueuedWrites"},
|
"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "ReadBytes", "ReadTime", "WriteBytes", "WriteTime", "QueuedWrites"},
|
||||||
"AWS/SWF": {"DecisionTaskScheduleToStartTime", "DecisionTaskStartToCloseTime", "DecisionTasksCompleted", "StartedDecisionTasksTimedOutOnClose", "WorkflowStartToCloseTime", "WorkflowsCanceled", "WorkflowsCompleted", "WorkflowsContinuedAsNew", "WorkflowsFailed", "WorkflowsTerminated", "WorkflowsTimedOut",
|
"AWS/SWF": {"DecisionTaskScheduleToStartTime", "DecisionTaskStartToCloseTime", "DecisionTasksCompleted", "StartedDecisionTasksTimedOutOnClose", "WorkflowStartToCloseTime", "WorkflowsCanceled", "WorkflowsCompleted", "WorkflowsContinuedAsNew", "WorkflowsFailed", "WorkflowsTerminated", "WorkflowsTimedOut",
|
||||||
"ActivityTaskScheduleToCloseTime", "ActivityTaskScheduleToStartTime", "ActivityTaskStartToCloseTime", "ActivityTasksCanceled", "ActivityTasksCompleted", "ActivityTasksFailed", "ScheduledActivityTasksTimedOutOnClose", "ScheduledActivityTasksTimedOutOnStart", "StartedActivityTasksTimedOutOnClose", "StartedActivityTasksTimedOutOnHeartbeat"},
|
"ActivityTaskScheduleToCloseTime", "ActivityTaskScheduleToStartTime", "ActivityTaskStartToCloseTime", "ActivityTasksCanceled", "ActivityTasksCompleted", "ActivityTasksFailed", "ScheduledActivityTasksTimedOutOnClose", "ScheduledActivityTasksTimedOutOnStart", "StartedActivityTasksTimedOutOnClose", "StartedActivityTasksTimedOutOnHeartbeat"},
|
||||||
|
"AWS/VPN": {"TunnelState", "TunnelDataIn", "TunnelDataOut"},
|
||||||
"AWS/WAF": {"AllowedRequests", "BlockedRequests", "CountedRequests"},
|
"AWS/WAF": {"AllowedRequests", "BlockedRequests", "CountedRequests"},
|
||||||
"AWS/WorkSpaces": {"Available", "Unhealthy", "ConnectionAttempt", "ConnectionSuccess", "ConnectionFailure", "SessionLaunchTime", "InSessionLatency", "SessionDisconnect"},
|
"AWS/WorkSpaces": {"Available", "Unhealthy", "ConnectionAttempt", "ConnectionSuccess", "ConnectionFailure", "SessionLaunchTime", "InSessionLatency", "SessionDisconnect"},
|
||||||
"KMS": {"SecondsUntilKeyMaterialExpiration"},
|
"KMS": {"SecondsUntilKeyMaterialExpiration"},
|
||||||
@ -100,6 +102,7 @@ func init() {
|
|||||||
"AWS/Billing": {"ServiceName", "LinkedAccount", "Currency"},
|
"AWS/Billing": {"ServiceName", "LinkedAccount", "Currency"},
|
||||||
"AWS/CloudFront": {"DistributionId", "Region"},
|
"AWS/CloudFront": {"DistributionId", "Region"},
|
||||||
"AWS/CloudSearch": {},
|
"AWS/CloudSearch": {},
|
||||||
|
"AWS/DMS": {"ReplicationInstanceIdentifier", "ReplicationTaskIdentifier"},
|
||||||
"AWS/DynamoDB": {"TableName", "GlobalSecondaryIndexName", "Operation", "StreamLabel"},
|
"AWS/DynamoDB": {"TableName", "GlobalSecondaryIndexName", "Operation", "StreamLabel"},
|
||||||
"AWS/EBS": {"VolumeId"},
|
"AWS/EBS": {"VolumeId"},
|
||||||
"AWS/EC2": {"AutoScalingGroupName", "ImageId", "InstanceId", "InstanceType"},
|
"AWS/EC2": {"AutoScalingGroupName", "ImageId", "InstanceId", "InstanceType"},
|
||||||
@ -121,14 +124,15 @@ func init() {
|
|||||||
"AWS/ML": {"MLModelId", "RequestMode"},
|
"AWS/ML": {"MLModelId", "RequestMode"},
|
||||||
"AWS/OpsWorks": {"StackId", "LayerId", "InstanceId"},
|
"AWS/OpsWorks": {"StackId", "LayerId", "InstanceId"},
|
||||||
"AWS/Redshift": {"NodeID", "ClusterIdentifier"},
|
"AWS/Redshift": {"NodeID", "ClusterIdentifier"},
|
||||||
"AWS/RDS": {"DBInstanceIdentifier", "DBClusterIdentifier", "DatabaseClass", "EngineName"},
|
"AWS/RDS": {"DBInstanceIdentifier", "DBClusterIdentifier", "DatabaseClass", "EngineName", "Role"},
|
||||||
"AWS/Route53": {"HealthCheckId"},
|
"AWS/Route53": {"HealthCheckId", "Region"},
|
||||||
"AWS/S3": {"BucketName", "StorageType", "FilterId"},
|
"AWS/S3": {"BucketName", "StorageType", "FilterId"},
|
||||||
"AWS/SES": {},
|
"AWS/SES": {},
|
||||||
"AWS/SNS": {"Application", "Platform", "TopicName"},
|
"AWS/SNS": {"Application", "Platform", "TopicName"},
|
||||||
"AWS/SQS": {"QueueName"},
|
"AWS/SQS": {"QueueName"},
|
||||||
"AWS/StorageGateway": {"GatewayId", "GatewayName", "VolumeId"},
|
"AWS/StorageGateway": {"GatewayId", "GatewayName", "VolumeId"},
|
||||||
"AWS/SWF": {"Domain", "WorkflowTypeName", "WorkflowTypeVersion", "ActivityTypeName", "ActivityTypeVersion"},
|
"AWS/SWF": {"Domain", "WorkflowTypeName", "WorkflowTypeVersion", "ActivityTypeName", "ActivityTypeVersion"},
|
||||||
|
"AWS/VPN": {"VpnId", "TunnelIpAddress"},
|
||||||
"AWS/WAF": {"Rule", "WebACL"},
|
"AWS/WAF": {"Rule", "WebACL"},
|
||||||
"AWS/WorkSpaces": {"DirectoryId", "WorkspaceId"},
|
"AWS/WorkSpaces": {"DirectoryId", "WorkspaceId"},
|
||||||
"KMS": {"KeyId"},
|
"KMS": {"KeyId"},
|
||||||
|
@ -2,12 +2,14 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/grafana/grafana/pkg/api/dtos"
|
"github.com/grafana/grafana/pkg/api/dtos"
|
||||||
"github.com/grafana/grafana/pkg/bus"
|
"github.com/grafana/grafana/pkg/bus"
|
||||||
|
"github.com/grafana/grafana/pkg/components/dashdiffs"
|
||||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||||
"github.com/grafana/grafana/pkg/log"
|
"github.com/grafana/grafana/pkg/log"
|
||||||
"github.com/grafana/grafana/pkg/metrics"
|
"github.com/grafana/grafana/pkg/metrics"
|
||||||
@ -60,6 +62,9 @@ func GetDashboard(c *middleware.Context) {
|
|||||||
creator = getUserLogin(dash.CreatedBy)
|
creator = getUserLogin(dash.CreatedBy)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// make sure db version is in sync with json model version
|
||||||
|
dash.Data.Set("version", dash.Version)
|
||||||
|
|
||||||
dto := dtos.DashboardFullWithMeta{
|
dto := dtos.DashboardFullWithMeta{
|
||||||
Dashboard: dash.Data,
|
Dashboard: dash.Data,
|
||||||
Meta: dtos.DashboardMeta{
|
Meta: dtos.DashboardMeta{
|
||||||
@ -77,6 +82,7 @@ func GetDashboard(c *middleware.Context) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(ben): copy this performance metrics logic for the new API endpoints added
|
||||||
c.TimeRequest(metrics.M_Api_Dashboard_Get)
|
c.TimeRequest(metrics.M_Api_Dashboard_Get)
|
||||||
c.JSON(200, dto)
|
c.JSON(200, dto)
|
||||||
}
|
}
|
||||||
@ -114,18 +120,15 @@ func DeleteDashboard(c *middleware.Context) {
|
|||||||
|
|
||||||
func PostDashboard(c *middleware.Context, cmd m.SaveDashboardCommand) Response {
|
func PostDashboard(c *middleware.Context, cmd m.SaveDashboardCommand) Response {
|
||||||
cmd.OrgId = c.OrgId
|
cmd.OrgId = c.OrgId
|
||||||
|
cmd.UserId = c.UserId
|
||||||
if !c.IsSignedIn {
|
|
||||||
cmd.UserId = -1
|
|
||||||
} else {
|
|
||||||
cmd.UserId = c.UserId
|
|
||||||
}
|
|
||||||
|
|
||||||
dash := cmd.GetDashboardModel()
|
dash := cmd.GetDashboardModel()
|
||||||
|
|
||||||
// Check if Title is empty
|
// Check if Title is empty
|
||||||
if dash.Title == "" {
|
if dash.Title == "" {
|
||||||
return ApiError(400, m.ErrDashboardTitleEmpty.Error(), nil)
|
return ApiError(400, m.ErrDashboardTitleEmpty.Error(), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
if dash.Id == 0 {
|
if dash.Id == 0 {
|
||||||
limitReached, err := middleware.QuotaReached(c, "dashboard")
|
limitReached, err := middleware.QuotaReached(c, "dashboard")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -255,6 +258,135 @@ func GetDashboardFromJsonFile(c *middleware.Context) {
|
|||||||
c.JSON(200, &dash)
|
c.JSON(200, &dash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetDashboardVersions returns all dashboard versions as JSON
|
||||||
|
func GetDashboardVersions(c *middleware.Context) Response {
|
||||||
|
dashboardId := c.ParamsInt64(":dashboardId")
|
||||||
|
limit := c.QueryInt("limit")
|
||||||
|
start := c.QueryInt("start")
|
||||||
|
|
||||||
|
if limit == 0 {
|
||||||
|
limit = 1000
|
||||||
|
}
|
||||||
|
|
||||||
|
query := m.GetDashboardVersionsQuery{
|
||||||
|
OrgId: c.OrgId,
|
||||||
|
DashboardId: dashboardId,
|
||||||
|
Limit: limit,
|
||||||
|
Start: start,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := bus.Dispatch(&query); err != nil {
|
||||||
|
return ApiError(404, fmt.Sprintf("No versions found for dashboardId %d", dashboardId), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, version := range query.Result {
|
||||||
|
if version.RestoredFrom == version.Version {
|
||||||
|
version.Message = "Initial save (created by migration)"
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if version.RestoredFrom > 0 {
|
||||||
|
version.Message = fmt.Sprintf("Restored from version %d", version.RestoredFrom)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if version.ParentVersion == 0 {
|
||||||
|
version.Message = "Initial save"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Json(200, query.Result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDashboardVersion returns the dashboard version with the given ID.
|
||||||
|
func GetDashboardVersion(c *middleware.Context) Response {
|
||||||
|
dashboardId := c.ParamsInt64(":dashboardId")
|
||||||
|
version := c.ParamsInt(":id")
|
||||||
|
|
||||||
|
query := m.GetDashboardVersionQuery{
|
||||||
|
OrgId: c.OrgId,
|
||||||
|
DashboardId: dashboardId,
|
||||||
|
Version: version,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := bus.Dispatch(&query); err != nil {
|
||||||
|
return ApiError(500, fmt.Sprintf("Dashboard version %d not found for dashboardId %d", version, dashboardId), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
creator := "Anonymous"
|
||||||
|
if query.Result.CreatedBy > 0 {
|
||||||
|
creator = getUserLogin(query.Result.CreatedBy)
|
||||||
|
}
|
||||||
|
|
||||||
|
dashVersionMeta := &m.DashboardVersionMeta{
|
||||||
|
DashboardVersion: *query.Result,
|
||||||
|
CreatedBy: creator,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Json(200, dashVersionMeta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /api/dashboards/calculate-diff performs diffs on two dashboards
|
||||||
|
func CalculateDashboardDiff(c *middleware.Context, apiOptions dtos.CalculateDiffOptions) Response {
|
||||||
|
|
||||||
|
options := dashdiffs.Options{
|
||||||
|
OrgId: c.OrgId,
|
||||||
|
DiffType: dashdiffs.ParseDiffType(apiOptions.DiffType),
|
||||||
|
Base: dashdiffs.DiffTarget{
|
||||||
|
DashboardId: apiOptions.Base.DashboardId,
|
||||||
|
Version: apiOptions.Base.Version,
|
||||||
|
UnsavedDashboard: apiOptions.Base.UnsavedDashboard,
|
||||||
|
},
|
||||||
|
New: dashdiffs.DiffTarget{
|
||||||
|
DashboardId: apiOptions.New.DashboardId,
|
||||||
|
Version: apiOptions.New.Version,
|
||||||
|
UnsavedDashboard: apiOptions.New.UnsavedDashboard,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := dashdiffs.CalculateDiff(&options)
|
||||||
|
if err != nil {
|
||||||
|
if err == m.ErrDashboardVersionNotFound {
|
||||||
|
return ApiError(404, "Dashboard version not found", err)
|
||||||
|
}
|
||||||
|
return ApiError(500, "Unable to compute diff", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.DiffType == dashdiffs.DiffDelta {
|
||||||
|
return Respond(200, result.Delta).Header("Content-Type", "application/json")
|
||||||
|
} else {
|
||||||
|
return Respond(200, result.Delta).Header("Content-Type", "text/html")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestoreDashboardVersion restores a dashboard to the given version.
|
||||||
|
func RestoreDashboardVersion(c *middleware.Context, apiCmd dtos.RestoreDashboardVersionCommand) Response {
|
||||||
|
dashboardId := c.ParamsInt64(":dashboardId")
|
||||||
|
|
||||||
|
dashQuery := m.GetDashboardQuery{Id: dashboardId, OrgId: c.OrgId}
|
||||||
|
if err := bus.Dispatch(&dashQuery); err != nil {
|
||||||
|
return ApiError(404, "Dashboard not found", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
versionQuery := m.GetDashboardVersionQuery{DashboardId: dashboardId, Version: apiCmd.Version, OrgId: c.OrgId}
|
||||||
|
if err := bus.Dispatch(&versionQuery); err != nil {
|
||||||
|
return ApiError(404, "Dashboard version not found", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
dashboard := dashQuery.Result
|
||||||
|
version := versionQuery.Result
|
||||||
|
|
||||||
|
saveCmd := m.SaveDashboardCommand{}
|
||||||
|
saveCmd.RestoredFrom = version.Version
|
||||||
|
saveCmd.OrgId = c.OrgId
|
||||||
|
saveCmd.UserId = c.UserId
|
||||||
|
saveCmd.Dashboard = version.Data
|
||||||
|
saveCmd.Dashboard.Set("version", dashboard.Version)
|
||||||
|
saveCmd.Message = fmt.Sprintf("Restored from version %d", version.Version)
|
||||||
|
|
||||||
|
return PostDashboard(c, saveCmd)
|
||||||
|
}
|
||||||
|
|
||||||
func GetDashboardTags(c *middleware.Context) {
|
func GetDashboardTags(c *middleware.Context) {
|
||||||
query := m.GetDashboardTagsQuery{OrgId: c.OrgId}
|
query := m.GetDashboardTagsQuery{OrgId: c.OrgId}
|
||||||
err := bus.Dispatch(&query)
|
err := bus.Dispatch(&query)
|
||||||
|
@ -3,6 +3,7 @@ package api
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -62,6 +63,27 @@ func NewReverseProxy(ds *m.DataSource, proxyPath string, targetUrl *url.URL) *ht
|
|||||||
// clear cookie headers
|
// clear cookie headers
|
||||||
req.Header.Del("Cookie")
|
req.Header.Del("Cookie")
|
||||||
req.Header.Del("Set-Cookie")
|
req.Header.Del("Set-Cookie")
|
||||||
|
|
||||||
|
// clear X-Forwarded Host/Port/Proto headers
|
||||||
|
req.Header.Del("X-Forwarded-Host")
|
||||||
|
req.Header.Del("X-Forwarded-Port")
|
||||||
|
req.Header.Del("X-Forwarded-Proto")
|
||||||
|
|
||||||
|
// set X-Forwarded-For header
|
||||||
|
if req.RemoteAddr != "" {
|
||||||
|
remoteAddr, _, err := net.SplitHostPort(req.RemoteAddr)
|
||||||
|
if err != nil {
|
||||||
|
remoteAddr = req.RemoteAddr
|
||||||
|
}
|
||||||
|
if req.Header.Get("X-Forwarded-For") != "" {
|
||||||
|
req.Header.Set("X-Forwarded-For", req.Header.Get("X-Forwarded-For")+", "+remoteAddr)
|
||||||
|
} else {
|
||||||
|
req.Header.Set("X-Forwarded-For", remoteAddr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reqBytes, _ := httputil.DumpRequestOut(req, true);
|
||||||
|
// log.Trace("Proxying datasource request: %s", string(reqBytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
return &httputil.ReverseProxy{Director: director, FlushInterval: time.Millisecond * 200}
|
return &httputil.ReverseProxy{Director: director, FlushInterval: time.Millisecond * 200}
|
||||||
|
@ -20,7 +20,7 @@ func GetDataSources(c *middleware.Context) Response {
|
|||||||
|
|
||||||
result := make(dtos.DataSourceList, 0)
|
result := make(dtos.DataSourceList, 0)
|
||||||
for _, ds := range query.Result {
|
for _, ds := range query.Result {
|
||||||
dsItem := dtos.DataSource{
|
dsItem := dtos.DataSourceListItemDTO{
|
||||||
Id: ds.Id,
|
Id: ds.Id,
|
||||||
OrgId: ds.OrgId,
|
OrgId: ds.OrgId,
|
||||||
Name: ds.Name,
|
Name: ds.Name,
|
||||||
@ -149,8 +149,8 @@ func fillWithSecureJsonData(cmd *m.UpdateDataSourceCommand) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
secureJsonData := ds.SecureJsonData.Decrypt()
|
|
||||||
|
|
||||||
|
secureJsonData := ds.SecureJsonData.Decrypt()
|
||||||
for k, v := range secureJsonData {
|
for k, v := range secureJsonData {
|
||||||
|
|
||||||
if _, ok := cmd.SecureJsonData[k]; !ok {
|
if _, ok := cmd.SecureJsonData[k]; !ok {
|
||||||
@ -158,6 +158,8 @@ func fillWithSecureJsonData(cmd *m.UpdateDataSourceCommand) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set version from db
|
||||||
|
cmd.Version = ds.Version
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,10 +12,25 @@ type Annotation struct {
|
|||||||
Title string `json:"title"`
|
Title string `json:"title"`
|
||||||
Text string `json:"text"`
|
Text string `json:"text"`
|
||||||
Metric string `json:"metric"`
|
Metric string `json:"metric"`
|
||||||
|
RegionId int64 `json:"regionId"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
|
||||||
Data *simplejson.Json `json:"data"`
|
Data *simplejson.Json `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PostAnnotationsCmd struct {
|
||||||
|
DashboardId int64 `json:"dashboardId"`
|
||||||
|
PanelId int64 `json:"panelId"`
|
||||||
|
CategoryId int64 `json:"categoryId"`
|
||||||
|
Time int64 `json:"time"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Text string `json:"text"`
|
||||||
|
|
||||||
|
FillColor string `json:"fillColor"`
|
||||||
|
IsRegion bool `json:"isRegion"`
|
||||||
|
TimeEnd int64 `json:"timeEnd"`
|
||||||
|
}
|
||||||
|
|
||||||
type DeleteAnnotationsCmd struct {
|
type DeleteAnnotationsCmd struct {
|
||||||
AlertId int64 `json:"alertId"`
|
AlertId int64 `json:"alertId"`
|
||||||
DashboardId int64 `json:"dashboardId"`
|
DashboardId int64 `json:"dashboardId"`
|
||||||
|
49
pkg/api/dtos/dashboard.go
Normal file
49
pkg/api/dtos/dashboard.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package dtos
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DashboardMeta struct {
|
||||||
|
IsStarred bool `json:"isStarred,omitempty"`
|
||||||
|
IsHome bool `json:"isHome,omitempty"`
|
||||||
|
IsSnapshot bool `json:"isSnapshot,omitempty"`
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
CanSave bool `json:"canSave"`
|
||||||
|
CanEdit bool `json:"canEdit"`
|
||||||
|
CanStar bool `json:"canStar"`
|
||||||
|
Slug string `json:"slug"`
|
||||||
|
Expires time.Time `json:"expires"`
|
||||||
|
Created time.Time `json:"created"`
|
||||||
|
Updated time.Time `json:"updated"`
|
||||||
|
UpdatedBy string `json:"updatedBy"`
|
||||||
|
CreatedBy string `json:"createdBy"`
|
||||||
|
Version int `json:"version"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DashboardFullWithMeta struct {
|
||||||
|
Meta DashboardMeta `json:"meta"`
|
||||||
|
Dashboard *simplejson.Json `json:"dashboard"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DashboardRedirect struct {
|
||||||
|
RedirectUri string `json:"redirectUri"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CalculateDiffOptions struct {
|
||||||
|
Base CalculateDiffTarget `json:"base" binding:"Required"`
|
||||||
|
New CalculateDiffTarget `json:"new" binding:"Required"`
|
||||||
|
DiffType string `json:"diffType" binding:"Required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CalculateDiffTarget struct {
|
||||||
|
DashboardId int64 `json:"dashboardId"`
|
||||||
|
Version int `json:"version"`
|
||||||
|
UnsavedDashboard *simplejson.Json `json:"unsavedDashboard"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RestoreDashboardVersionCommand struct {
|
||||||
|
Version int `json:"version" binding:"Required"`
|
||||||
|
}
|
@ -4,7 +4,6 @@ import (
|
|||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||||
m "github.com/grafana/grafana/pkg/models"
|
m "github.com/grafana/grafana/pkg/models"
|
||||||
@ -38,32 +37,6 @@ type CurrentUser struct {
|
|||||||
HelpFlags1 m.HelpFlags1 `json:"helpFlags1"`
|
HelpFlags1 m.HelpFlags1 `json:"helpFlags1"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DashboardMeta struct {
|
|
||||||
IsStarred bool `json:"isStarred,omitempty"`
|
|
||||||
IsHome bool `json:"isHome,omitempty"`
|
|
||||||
IsSnapshot bool `json:"isSnapshot,omitempty"`
|
|
||||||
Type string `json:"type,omitempty"`
|
|
||||||
CanSave bool `json:"canSave"`
|
|
||||||
CanEdit bool `json:"canEdit"`
|
|
||||||
CanStar bool `json:"canStar"`
|
|
||||||
Slug string `json:"slug"`
|
|
||||||
Expires time.Time `json:"expires"`
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
Updated time.Time `json:"updated"`
|
|
||||||
UpdatedBy string `json:"updatedBy"`
|
|
||||||
CreatedBy string `json:"createdBy"`
|
|
||||||
Version int `json:"version"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DashboardFullWithMeta struct {
|
|
||||||
Meta DashboardMeta `json:"meta"`
|
|
||||||
Dashboard *simplejson.Json `json:"dashboard"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DashboardRedirect struct {
|
|
||||||
RedirectUri string `json:"redirectUri"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DataSource struct {
|
type DataSource struct {
|
||||||
Id int64 `json:"id"`
|
Id int64 `json:"id"`
|
||||||
OrgId int64 `json:"orgId"`
|
OrgId int64 `json:"orgId"`
|
||||||
@ -84,7 +57,23 @@ type DataSource struct {
|
|||||||
SecureJsonFields map[string]bool `json:"secureJsonFields"`
|
SecureJsonFields map[string]bool `json:"secureJsonFields"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataSourceList []DataSource
|
type DataSourceListItemDTO struct {
|
||||||
|
Id int64 `json:"id"`
|
||||||
|
OrgId int64 `json:"orgId"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
TypeLogoUrl string `json:"typeLogoUrl"`
|
||||||
|
Access m.DsAccess `json:"access"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
User string `json:"user"`
|
||||||
|
Database string `json:"database"`
|
||||||
|
BasicAuth bool `json:"basicAuth"`
|
||||||
|
IsDefault bool `json:"isDefault"`
|
||||||
|
JsonData *simplejson.Json `json:"jsonData,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataSourceList []DataSourceListItemDTO
|
||||||
|
|
||||||
func (slice DataSourceList) Len() int {
|
func (slice DataSourceList) Len() int {
|
||||||
return len(slice)
|
return len(slice)
|
||||||
|
@ -102,18 +102,15 @@ func getFrontendSettingsMap(c *middleware.Context) (map[string]interface{}, erro
|
|||||||
datasources[ds.Name] = dsMap
|
datasources[ds.Name] = dsMap
|
||||||
}
|
}
|
||||||
|
|
||||||
// add grafana backend data source
|
// add datasources that are built in (meaning they are not added via data sources page, nor have any entry in datasource table)
|
||||||
grafanaDatasourceMeta, _ := plugins.DataSources["grafana"]
|
for _, ds := range plugins.DataSources {
|
||||||
datasources["-- Grafana --"] = map[string]interface{}{
|
if ds.BuiltIn {
|
||||||
"type": "grafana",
|
datasources[ds.Name] = map[string]interface{}{
|
||||||
"name": "-- Grafana --",
|
"type": ds.Type,
|
||||||
"meta": grafanaDatasourceMeta,
|
"name": ds.Name,
|
||||||
}
|
"meta": plugins.DataSources[ds.Id],
|
||||||
|
}
|
||||||
// add mixed backend data source
|
}
|
||||||
datasources["-- Mixed --"] = map[string]interface{}{
|
|
||||||
"type": "mixed",
|
|
||||||
"meta": plugins.DataSources["mixed"],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if defaultDatasource == "" {
|
if defaultDatasource == "" {
|
||||||
@ -169,10 +166,12 @@ func getPanelSort(id string) int {
|
|||||||
sort = 3
|
sort = 3
|
||||||
case "text":
|
case "text":
|
||||||
sort = 4
|
sort = 4
|
||||||
case "alertlist":
|
case "heatmap":
|
||||||
sort = 5
|
sort = 5
|
||||||
case "dashlist":
|
case "alertlist":
|
||||||
sort = 6
|
sort = 6
|
||||||
|
case "dashlist":
|
||||||
|
sort = 7
|
||||||
}
|
}
|
||||||
return sort
|
return sort
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
"github.com/grafana/grafana/pkg/util"
|
"github.com/grafana/grafana/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
var gNetProxyTransport = &http.Transport{
|
var grafanaComProxyTransport = &http.Transport{
|
||||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: false},
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: false},
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
Dial: (&net.Dialer{
|
Dial: (&net.Dialer{
|
||||||
@ -24,7 +24,7 @@ var gNetProxyTransport = &http.Transport{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ReverseProxyGnetReq(proxyPath string) *httputil.ReverseProxy {
|
func ReverseProxyGnetReq(proxyPath string) *httputil.ReverseProxy {
|
||||||
url, _ := url.Parse(setting.GrafanaNetUrl)
|
url, _ := url.Parse(setting.GrafanaComUrl)
|
||||||
|
|
||||||
director := func(req *http.Request) {
|
director := func(req *http.Request) {
|
||||||
req.URL.Scheme = url.Scheme
|
req.URL.Scheme = url.Scheme
|
||||||
@ -45,7 +45,7 @@ func ReverseProxyGnetReq(proxyPath string) *httputil.ReverseProxy {
|
|||||||
func ProxyGnetRequest(c *middleware.Context) {
|
func ProxyGnetRequest(c *middleware.Context) {
|
||||||
proxyPath := c.Params("*")
|
proxyPath := c.Params("*")
|
||||||
proxy := ReverseProxyGnetReq(proxyPath)
|
proxy := ReverseProxyGnetReq(proxyPath)
|
||||||
proxy.Transport = gNetProxyTransport
|
proxy.Transport = grafanaComProxyTransport
|
||||||
proxy.ServeHTTP(c.Resp, c.Req.Request)
|
proxy.ServeHTTP(c.Resp, c.Req.Request)
|
||||||
c.Resp.Header().Del("Set-Cookie")
|
c.Resp.Header().Del("Set-Cookie")
|
||||||
}
|
}
|
@ -5,6 +5,7 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@ -13,9 +14,12 @@ import (
|
|||||||
|
|
||||||
"github.com/grafana/grafana/pkg/api/live"
|
"github.com/grafana/grafana/pkg/api/live"
|
||||||
httpstatic "github.com/grafana/grafana/pkg/api/static"
|
httpstatic "github.com/grafana/grafana/pkg/api/static"
|
||||||
|
"github.com/grafana/grafana/pkg/bus"
|
||||||
"github.com/grafana/grafana/pkg/cmd/grafana-cli/logger"
|
"github.com/grafana/grafana/pkg/cmd/grafana-cli/logger"
|
||||||
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||||
"github.com/grafana/grafana/pkg/log"
|
"github.com/grafana/grafana/pkg/log"
|
||||||
"github.com/grafana/grafana/pkg/middleware"
|
"github.com/grafana/grafana/pkg/middleware"
|
||||||
|
"github.com/grafana/grafana/pkg/models"
|
||||||
"github.com/grafana/grafana/pkg/plugins"
|
"github.com/grafana/grafana/pkg/plugins"
|
||||||
"github.com/grafana/grafana/pkg/setting"
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
)
|
)
|
||||||
@ -46,7 +50,7 @@ func (hs *HttpServer) Start(ctx context.Context) error {
|
|||||||
hs.streamManager.Run(ctx)
|
hs.streamManager.Run(ctx)
|
||||||
|
|
||||||
listenAddr := fmt.Sprintf("%s:%s", setting.HttpAddr, setting.HttpPort)
|
listenAddr := fmt.Sprintf("%s:%s", setting.HttpAddr, setting.HttpPort)
|
||||||
hs.log.Info("Initializing HTTP Server", "address", listenAddr, "protocol", setting.Protocol, "subUrl", setting.AppSubUrl)
|
hs.log.Info("Initializing HTTP Server", "address", listenAddr, "protocol", setting.Protocol, "subUrl", setting.AppSubUrl, "socket", setting.SocketPath)
|
||||||
|
|
||||||
hs.httpSrv = &http.Server{Addr: listenAddr, Handler: hs.macaron}
|
hs.httpSrv = &http.Server{Addr: listenAddr, Handler: hs.macaron}
|
||||||
switch setting.Protocol {
|
switch setting.Protocol {
|
||||||
@ -62,6 +66,18 @@ func (hs *HttpServer) Start(ctx context.Context) error {
|
|||||||
hs.log.Debug("server was shutdown gracefully")
|
hs.log.Debug("server was shutdown gracefully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
case setting.SOCKET:
|
||||||
|
ln, err := net.Listen("unix", setting.SocketPath)
|
||||||
|
if err != nil {
|
||||||
|
hs.log.Debug("server was shutdown gracefully")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = hs.httpSrv.Serve(ln)
|
||||||
|
if err != nil {
|
||||||
|
hs.log.Debug("server was shutdown gracefully")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
hs.log.Error("Invalid protocol", "protocol", setting.Protocol)
|
hs.log.Error("Invalid protocol", "protocol", setting.Protocol)
|
||||||
err = errors.New("Invalid Protocol")
|
err = errors.New("Invalid Protocol")
|
||||||
@ -147,6 +163,7 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron {
|
|||||||
Delims: macaron.Delims{Left: "[[", Right: "]]"},
|
Delims: macaron.Delims{Left: "[[", Right: "]]"},
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
m.Use(hs.healthHandler)
|
||||||
m.Use(middleware.GetContextHandler())
|
m.Use(middleware.GetContextHandler())
|
||||||
m.Use(middleware.Sessioner(&setting.SessionOptions))
|
m.Use(middleware.Sessioner(&setting.SessionOptions))
|
||||||
m.Use(middleware.RequestMetrics())
|
m.Use(middleware.RequestMetrics())
|
||||||
@ -160,6 +177,29 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron {
|
|||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (hs *HttpServer) healthHandler(ctx *macaron.Context) {
|
||||||
|
if ctx.Req.Method != "GET" || ctx.Req.URL.Path != "/api/health" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data := simplejson.New()
|
||||||
|
data.Set("database", "ok")
|
||||||
|
data.Set("version", setting.BuildVersion)
|
||||||
|
data.Set("commit", setting.BuildCommit)
|
||||||
|
|
||||||
|
if err := bus.Dispatch(&models.GetDBHealthQuery{}); err != nil {
|
||||||
|
data.Set("database", "failing")
|
||||||
|
ctx.Resp.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||||
|
ctx.Resp.WriteHeader(503)
|
||||||
|
} else {
|
||||||
|
ctx.Resp.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||||
|
ctx.Resp.WriteHeader(200)
|
||||||
|
}
|
||||||
|
|
||||||
|
dataBytes, _ := data.EncodePretty()
|
||||||
|
ctx.Resp.Write(dataBytes)
|
||||||
|
}
|
||||||
|
|
||||||
func (hs *HttpServer) mapStatic(m *macaron.Macaron, rootDir string, dir string, prefix string) {
|
func (hs *HttpServer) mapStatic(m *macaron.Macaron, rootDir string, dir string, prefix string) {
|
||||||
headers := func(c *macaron.Context) {
|
headers := func(c *macaron.Context) {
|
||||||
c.Resp.Header().Set("Cache-Control", "public, max-age=3600")
|
c.Resp.Header().Set("Cache-Control", "public, max-age=3600")
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
"github.com/grafana/grafana/pkg/middleware"
|
"github.com/grafana/grafana/pkg/middleware"
|
||||||
m "github.com/grafana/grafana/pkg/models"
|
m "github.com/grafana/grafana/pkg/models"
|
||||||
"github.com/grafana/grafana/pkg/setting"
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
"github.com/grafana/grafana/pkg/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -79,8 +78,7 @@ func tryLoginUsingRememberCookie(c *middleware.Context) bool {
|
|||||||
user := userQuery.Result
|
user := userQuery.Result
|
||||||
|
|
||||||
// validate remember me cookie
|
// validate remember me cookie
|
||||||
if val, _ := c.GetSuperSecureCookie(
|
if val, _ := c.GetSuperSecureCookie(user.Rands+user.Password, setting.CookieRememberName); val != user.Login {
|
||||||
util.EncodeMd5(user.Rands+user.Password), setting.CookieRememberName); val != user.Login {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,7 +140,7 @@ func loginUserWithUser(user *m.User, c *middleware.Context) {
|
|||||||
days := 86400 * setting.LogInRememberDays
|
days := 86400 * setting.LogInRememberDays
|
||||||
if days > 0 {
|
if days > 0 {
|
||||||
c.SetCookie(setting.CookieUserName, user.Login, days, setting.AppSubUrl+"/")
|
c.SetCookie(setting.CookieUserName, user.Login, days, setting.AppSubUrl+"/")
|
||||||
c.SetSuperSecureCookie(util.EncodeMd5(user.Rands+user.Password), setting.CookieRememberName, user.Login, days, setting.AppSubUrl+"/")
|
c.SetSuperSecureCookie(user.Rands+user.Password, setting.CookieRememberName, user.Login, days, setting.AppSubUrl+"/")
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Session.Set(middleware.SESS_KEY_USERID, user.Id)
|
c.Session.Set(middleware.SESS_KEY_USERID, user.Id)
|
||||||
|
@ -28,6 +28,7 @@ var (
|
|||||||
ErrEmailNotAllowed = errors.New("Required email domain not fulfilled")
|
ErrEmailNotAllowed = errors.New("Required email domain not fulfilled")
|
||||||
ErrSignUpNotAllowed = errors.New("Signup is not allowed for this adapter")
|
ErrSignUpNotAllowed = errors.New("Signup is not allowed for this adapter")
|
||||||
ErrUsersQuotaReached = errors.New("Users quota reached")
|
ErrUsersQuotaReached = errors.New("Users quota reached")
|
||||||
|
ErrNoEmail = errors.New("Login provider didn't return an email address")
|
||||||
)
|
)
|
||||||
|
|
||||||
func GenStateString() string {
|
func GenStateString() string {
|
||||||
@ -63,7 +64,7 @@ func OAuthLogin(ctx *middleware.Context) {
|
|||||||
if setting.OAuthService.OAuthInfos[name].HostedDomain == "" {
|
if setting.OAuthService.OAuthInfos[name].HostedDomain == "" {
|
||||||
ctx.Redirect(connect.AuthCodeURL(state, oauth2.AccessTypeOnline))
|
ctx.Redirect(connect.AuthCodeURL(state, oauth2.AccessTypeOnline))
|
||||||
} else {
|
} else {
|
||||||
ctx.Redirect(connect.AuthCodeURL(state, oauth2.SetParam("hd", setting.OAuthService.OAuthInfos[name].HostedDomain), oauth2.AccessTypeOnline))
|
ctx.Redirect(connect.AuthCodeURL(state, oauth2.SetAuthURLParam("hd", setting.OAuthService.OAuthInfos[name].HostedDomain), oauth2.AccessTypeOnline))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -134,6 +135,12 @@ func OAuthLogin(ctx *middleware.Context) {
|
|||||||
|
|
||||||
ctx.Logger.Debug("OAuthLogin got user info", "userInfo", userInfo)
|
ctx.Logger.Debug("OAuthLogin got user info", "userInfo", userInfo)
|
||||||
|
|
||||||
|
// validate that we got at least an email address
|
||||||
|
if userInfo.Email == "" {
|
||||||
|
redirectWithError(ctx, ErrNoEmail)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// validate that the email is allowed to login to grafana
|
// validate that the email is allowed to login to grafana
|
||||||
if !connect.IsEmailAllowed(userInfo.Email) {
|
if !connect.IsEmailAllowed(userInfo.Email) {
|
||||||
redirectWithError(ctx, ErrEmailNotAllowed)
|
redirectWithError(ctx, ErrEmailNotAllowed)
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"github.com/grafana/grafana/pkg/api/dtos"
|
"github.com/grafana/grafana/pkg/api/dtos"
|
||||||
"github.com/grafana/grafana/pkg/bus"
|
"github.com/grafana/grafana/pkg/bus"
|
||||||
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||||
"github.com/grafana/grafana/pkg/metrics"
|
"github.com/grafana/grafana/pkg/metrics"
|
||||||
"github.com/grafana/grafana/pkg/middleware"
|
"github.com/grafana/grafana/pkg/middleware"
|
||||||
"github.com/grafana/grafana/pkg/models"
|
"github.com/grafana/grafana/pkg/models"
|
||||||
@ -50,13 +51,16 @@ func QueryMetrics(c *middleware.Context, reqDto dtos.MetricRequest) Response {
|
|||||||
return ApiError(500, "Metric request error", err)
|
return ApiError(500, "Metric request error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
statusCode := 200
|
||||||
for _, res := range resp.Results {
|
for _, res := range resp.Results {
|
||||||
if res.Error != nil {
|
if res.Error != nil {
|
||||||
res.ErrorString = res.Error.Error()
|
res.ErrorString = res.Error.Error()
|
||||||
|
resp.Message = res.ErrorString
|
||||||
|
statusCode = 500
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return Json(200, &resp)
|
return Json(statusCode, &resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GET /api/tsdb/testdata/scenarios
|
// GET /api/tsdb/testdata/scenarios
|
||||||
@ -141,3 +145,29 @@ func GenerateSqlTestData(c *middleware.Context) Response {
|
|||||||
|
|
||||||
return Json(200, &util.DynMap{"message": "OK"})
|
return Json(200, &util.DynMap{"message": "OK"})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GET /api/tsdb/testdata/random-walk
|
||||||
|
func GetTestDataRandomWalk(c *middleware.Context) Response {
|
||||||
|
from := c.Query("from")
|
||||||
|
to := c.Query("to")
|
||||||
|
intervalMs := c.QueryInt64("intervalMs")
|
||||||
|
|
||||||
|
timeRange := tsdb.NewTimeRange(from, to)
|
||||||
|
request := &tsdb.Request{TimeRange: timeRange}
|
||||||
|
|
||||||
|
request.Queries = append(request.Queries, &tsdb.Query{
|
||||||
|
RefId: "A",
|
||||||
|
IntervalMs: intervalMs,
|
||||||
|
Model: simplejson.NewFromAny(&util.DynMap{
|
||||||
|
"scenario": "random_walk",
|
||||||
|
}),
|
||||||
|
DataSource: &models.DataSource{Type: "grafana-testdata-datasource"},
|
||||||
|
})
|
||||||
|
|
||||||
|
resp, err := tsdb.HandleRequest(context.Background(), request)
|
||||||
|
if err != nil {
|
||||||
|
return ApiError(500, "Metric request error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Json(200, &resp)
|
||||||
|
}
|
||||||
|
@ -12,7 +12,8 @@ func SendResetPasswordEmail(c *middleware.Context, form dtos.SendResetPasswordEm
|
|||||||
userQuery := m.GetUserByLoginQuery{LoginOrEmail: form.UserOrEmail}
|
userQuery := m.GetUserByLoginQuery{LoginOrEmail: form.UserOrEmail}
|
||||||
|
|
||||||
if err := bus.Dispatch(&userQuery); err != nil {
|
if err := bus.Dispatch(&userQuery); err != nil {
|
||||||
return ApiError(404, "User does not exist", err)
|
c.Logger.Info("Requested password reset for user that was not found", "user", userQuery.LoginOrEmail)
|
||||||
|
return ApiError(200, "Email sent", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
emailCmd := m.SendResetPasswordEmailCommand{User: userQuery.Result}
|
emailCmd := m.SendResetPasswordEmailCommand{User: userQuery.Result}
|
||||||
|
@ -91,6 +91,6 @@ func LoadPlaylistDashboards(orgId, userId, playlistId int64) (dtos.PlaylistDashb
|
|||||||
result = append(result, k...)
|
result = append(result, k...)
|
||||||
result = append(result, populateDashboardsByTag(orgId, userId, dashboardByTag, dashboardTagOrder)...)
|
result = append(result, populateDashboardsByTag(orgId, userId, dashboardByTag, dashboardTagOrder)...)
|
||||||
|
|
||||||
sort.Sort(sort.Reverse(result))
|
sort.Sort(result)
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -71,7 +72,25 @@ func NewApiPluginProxy(ctx *middleware.Context, proxyPath string, route *plugins
|
|||||||
req.Header.Del("Cookie")
|
req.Header.Del("Cookie")
|
||||||
req.Header.Del("Set-Cookie")
|
req.Header.Del("Set-Cookie")
|
||||||
|
|
||||||
//Create a HTTP header with the context in it.
|
// clear X-Forwarded Host/Port/Proto headers
|
||||||
|
req.Header.Del("X-Forwarded-Host")
|
||||||
|
req.Header.Del("X-Forwarded-Port")
|
||||||
|
req.Header.Del("X-Forwarded-Proto")
|
||||||
|
|
||||||
|
// set X-Forwarded-For header
|
||||||
|
if req.RemoteAddr != "" {
|
||||||
|
remoteAddr, _, err := net.SplitHostPort(req.RemoteAddr)
|
||||||
|
if err != nil {
|
||||||
|
remoteAddr = req.RemoteAddr
|
||||||
|
}
|
||||||
|
if req.Header.Get("X-Forwarded-For") != "" {
|
||||||
|
req.Header.Set("X-Forwarded-For", req.Header.Get("X-Forwarded-For")+", "+remoteAddr)
|
||||||
|
} else {
|
||||||
|
req.Header.Set("X-Forwarded-For", remoteAddr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a HTTP header with the context in it.
|
||||||
ctxJson, err := json.Marshal(ctx.SignedInUser)
|
ctxJson, err := json.Marshal(ctx.SignedInUser)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.JsonApiErr(500, "failed to marshal context to json.", err)
|
ctx.JsonApiErr(500, "failed to marshal context to json.", err)
|
||||||
@ -93,6 +112,8 @@ func NewApiPluginProxy(ctx *middleware.Context, proxyPath string, route *plugins
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reqBytes, _ := httputil.DumpRequestOut(req, true);
|
||||||
|
// log.Trace("Proxying plugin request: %s", string(reqBytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
return &httputil.ReverseProxy{Director: director}
|
return &httputil.ReverseProxy{Director: director}
|
||||||
|
@ -23,9 +23,14 @@ func TestPluginProxy(t *testing.T) {
|
|||||||
setting.SecretKey = "password"
|
setting.SecretKey = "password"
|
||||||
|
|
||||||
bus.AddHandler("test", func(query *m.GetPluginSettingByIdQuery) error {
|
bus.AddHandler("test", func(query *m.GetPluginSettingByIdQuery) error {
|
||||||
|
key, err := util.Encrypt([]byte("123"), "password")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
query.Result = &m.PluginSetting{
|
query.Result = &m.PluginSetting{
|
||||||
SecureJsonData: map[string][]byte{
|
SecureJsonData: map[string][]byte{
|
||||||
"key": util.Encrypt([]byte("123"), "password"),
|
"key": key,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -11,22 +11,18 @@ import (
|
|||||||
"github.com/grafana/grafana/pkg/setting"
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
)
|
)
|
||||||
|
|
||||||
var configFile = flag.String("config", "", "path to config file")
|
|
||||||
var homePath = flag.String("homepath", "", "path to grafana install/home path, defaults to working directory")
|
|
||||||
|
|
||||||
func runDbCommand(command func(commandLine CommandLine) error) func(context *cli.Context) {
|
func runDbCommand(command func(commandLine CommandLine) error) func(context *cli.Context) {
|
||||||
return func(context *cli.Context) {
|
return func(context *cli.Context) {
|
||||||
|
cmd := &contextCommandLine{context}
|
||||||
|
|
||||||
flag.Parse()
|
|
||||||
setting.NewConfigContext(&setting.CommandLineArgs{
|
setting.NewConfigContext(&setting.CommandLineArgs{
|
||||||
Config: *configFile,
|
Config: cmd.String("config"),
|
||||||
HomePath: *homePath,
|
HomePath: cmd.String("homepath"),
|
||||||
Args: flag.Args(),
|
Args: flag.Args(),
|
||||||
})
|
})
|
||||||
|
|
||||||
sqlstore.NewEngine()
|
sqlstore.NewEngine()
|
||||||
|
|
||||||
cmd := &contextCommandLine{context}
|
|
||||||
if err := command(cmd); err != nil {
|
if err := command(cmd); err != nil {
|
||||||
logger.Errorf("\n%s: ", color.RedString("Error"))
|
logger.Errorf("\n%s: ", color.RedString("Error"))
|
||||||
logger.Errorf("%s\n\n", err)
|
logger.Errorf("%s\n\n", err)
|
||||||
@ -95,6 +91,16 @@ var adminCommands = []cli.Command{
|
|||||||
Name: "reset-admin-password",
|
Name: "reset-admin-password",
|
||||||
Usage: "reset-admin-password <new password>",
|
Usage: "reset-admin-password <new password>",
|
||||||
Action: runDbCommand(resetPasswordCommand),
|
Action: runDbCommand(resetPasswordCommand),
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "homepath",
|
||||||
|
Usage: "path to grafana install/home path, defaults to working directory",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "config",
|
||||||
|
Usage: "path to config file",
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,10 +8,14 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"runtime/trace"
|
||||||
"strconv"
|
"strconv"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"net/http"
|
||||||
|
_ "net/http/pprof"
|
||||||
|
|
||||||
"github.com/grafana/grafana/pkg/log"
|
"github.com/grafana/grafana/pkg/log"
|
||||||
"github.com/grafana/grafana/pkg/models"
|
"github.com/grafana/grafana/pkg/models"
|
||||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||||
@ -44,12 +48,33 @@ func init() {
|
|||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
v := flag.Bool("v", false, "prints current version and exits")
|
v := flag.Bool("v", false, "prints current version and exits")
|
||||||
|
profile := flag.Bool("profile", false, "Turn on pprof profiling")
|
||||||
|
profilePort := flag.Int("profile-port", 6060, "Define custom port for profiling")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
if *v {
|
if *v {
|
||||||
fmt.Printf("Version %s (commit: %s)\n", version, commit)
|
fmt.Printf("Version %s (commit: %s)\n", version, commit)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if *profile {
|
||||||
|
runtime.SetBlockProfileRate(1)
|
||||||
|
go func() {
|
||||||
|
http.ListenAndServe(fmt.Sprintf("localhost:%d", *profilePort), nil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
f, err := os.Create("trace.out")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
err = trace.Start(f)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer trace.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64)
|
buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64)
|
||||||
if buildstampInt64 == 0 {
|
if buildstampInt64 == 0 {
|
||||||
buildstampInt64 = time.Now().Unix()
|
buildstampInt64 = time.Now().Unix()
|
||||||
@ -113,6 +138,8 @@ func listenToSystemSignals(server models.GrafanaServer) {
|
|||||||
|
|
||||||
select {
|
select {
|
||||||
case sig := <-signalChan:
|
case sig := <-signalChan:
|
||||||
|
// Stops trace if profiling has been enabled
|
||||||
|
trace.Stop()
|
||||||
server.Shutdown(0, fmt.Sprintf("system signal: %s", sig))
|
server.Shutdown(0, fmt.Sprintf("system signal: %s", sig))
|
||||||
case code = <-exitChan:
|
case code = <-exitChan:
|
||||||
server.Shutdown(code, "startup error")
|
server.Shutdown(code, "startup error")
|
||||||
|
149
pkg/components/dashdiffs/compare.go
Normal file
149
pkg/components/dashdiffs/compare.go
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
package dashdiffs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/bus"
|
||||||
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||||
|
"github.com/grafana/grafana/pkg/log"
|
||||||
|
"github.com/grafana/grafana/pkg/models"
|
||||||
|
diff "github.com/yudai/gojsondiff"
|
||||||
|
deltaFormatter "github.com/yudai/gojsondiff/formatter"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrUnsupportedDiffType occurs when an invalid diff type is used.
|
||||||
|
ErrUnsupportedDiffType = errors.New("dashdiff: unsupported diff type")
|
||||||
|
|
||||||
|
// ErrNilDiff occurs when two compared interfaces are identical.
|
||||||
|
ErrNilDiff = errors.New("dashdiff: diff is nil")
|
||||||
|
|
||||||
|
diffLogger = log.New("dashdiffs")
|
||||||
|
)
|
||||||
|
|
||||||
|
type DiffType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
DiffJSON DiffType = iota
|
||||||
|
DiffBasic
|
||||||
|
DiffDelta
|
||||||
|
)
|
||||||
|
|
||||||
|
type Options struct {
|
||||||
|
OrgId int64
|
||||||
|
Base DiffTarget
|
||||||
|
New DiffTarget
|
||||||
|
DiffType DiffType
|
||||||
|
}
|
||||||
|
|
||||||
|
type DiffTarget struct {
|
||||||
|
DashboardId int64
|
||||||
|
Version int
|
||||||
|
UnsavedDashboard *simplejson.Json
|
||||||
|
}
|
||||||
|
|
||||||
|
type Result struct {
|
||||||
|
Delta []byte `json:"delta"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseDiffType(diff string) DiffType {
|
||||||
|
switch diff {
|
||||||
|
case "json":
|
||||||
|
return DiffJSON
|
||||||
|
case "basic":
|
||||||
|
return DiffBasic
|
||||||
|
case "delta":
|
||||||
|
return DiffDelta
|
||||||
|
}
|
||||||
|
return DiffBasic
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompareDashboardVersionsCommand computes the JSON diff of two versions,
|
||||||
|
// assigning the delta of the diff to the `Delta` field.
|
||||||
|
func CalculateDiff(options *Options) (*Result, error) {
|
||||||
|
baseVersionQuery := models.GetDashboardVersionQuery{
|
||||||
|
DashboardId: options.Base.DashboardId,
|
||||||
|
Version: options.Base.Version,
|
||||||
|
OrgId: options.OrgId,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := bus.Dispatch(&baseVersionQuery); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newVersionQuery := models.GetDashboardVersionQuery{
|
||||||
|
DashboardId: options.New.DashboardId,
|
||||||
|
Version: options.New.Version,
|
||||||
|
OrgId: options.OrgId,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := bus.Dispatch(&newVersionQuery); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
baseData := baseVersionQuery.Result.Data
|
||||||
|
newData := newVersionQuery.Result.Data
|
||||||
|
|
||||||
|
left, jsonDiff, err := getDiff(baseData, newData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := &Result{}
|
||||||
|
|
||||||
|
switch options.DiffType {
|
||||||
|
case DiffDelta:
|
||||||
|
|
||||||
|
deltaOutput, err := deltaFormatter.NewDeltaFormatter().Format(jsonDiff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result.Delta = []byte(deltaOutput)
|
||||||
|
|
||||||
|
case DiffJSON:
|
||||||
|
jsonOutput, err := NewJSONFormatter(left).Format(jsonDiff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result.Delta = []byte(jsonOutput)
|
||||||
|
|
||||||
|
case DiffBasic:
|
||||||
|
basicOutput, err := NewBasicFormatter(left).Format(jsonDiff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result.Delta = basicOutput
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, ErrUnsupportedDiffType
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDiff computes the diff of two dashboard versions.
|
||||||
|
func getDiff(baseData, newData *simplejson.Json) (interface{}, diff.Diff, error) {
|
||||||
|
leftBytes, err := baseData.Encode()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rightBytes, err := newData.Encode()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
jsonDiff, err := diff.New().Compare(leftBytes, rightBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !jsonDiff.Modified() {
|
||||||
|
return nil, nil, ErrNilDiff
|
||||||
|
}
|
||||||
|
|
||||||
|
left := make(map[string]interface{})
|
||||||
|
err = json.Unmarshal(leftBytes, &left)
|
||||||
|
return left, jsonDiff, nil
|
||||||
|
}
|
339
pkg/components/dashdiffs/formatter_basic.go
Normal file
339
pkg/components/dashdiffs/formatter_basic.go
Normal file
@ -0,0 +1,339 @@
|
|||||||
|
package dashdiffs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"html/template"
|
||||||
|
|
||||||
|
diff "github.com/yudai/gojsondiff"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A BasicDiff holds the stateful values that are used when generating a basic
|
||||||
|
// diff from JSON tokens.
|
||||||
|
type BasicDiff struct {
|
||||||
|
narrow string
|
||||||
|
keysIdent int
|
||||||
|
writing bool
|
||||||
|
LastIndent int
|
||||||
|
Block *BasicBlock
|
||||||
|
Change *BasicChange
|
||||||
|
Summary *BasicSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BasicBlock represents a top-level element in a basic diff.
|
||||||
|
type BasicBlock struct {
|
||||||
|
Title string
|
||||||
|
Old interface{}
|
||||||
|
New interface{}
|
||||||
|
Change ChangeType
|
||||||
|
Changes []*BasicChange
|
||||||
|
Summaries []*BasicSummary
|
||||||
|
LineStart int
|
||||||
|
LineEnd int
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BasicChange represents the change from an old to new value. There are many
|
||||||
|
// BasicChanges in a BasicBlock.
|
||||||
|
type BasicChange struct {
|
||||||
|
Key string
|
||||||
|
Old interface{}
|
||||||
|
New interface{}
|
||||||
|
Change ChangeType
|
||||||
|
LineStart int
|
||||||
|
LineEnd int
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BasicSummary represents the changes within a basic block that're too deep
|
||||||
|
// or verbose to be represented in the top-level BasicBlock element, or in the
|
||||||
|
// BasicChange. Instead of showing the values in this case, we simply print
|
||||||
|
// the key and count how many times the given change was applied to that
|
||||||
|
// element.
|
||||||
|
type BasicSummary struct {
|
||||||
|
Key string
|
||||||
|
Change ChangeType
|
||||||
|
Count int
|
||||||
|
LineStart int
|
||||||
|
LineEnd int
|
||||||
|
}
|
||||||
|
|
||||||
|
type BasicFormatter struct {
|
||||||
|
jsonDiff *JSONFormatter
|
||||||
|
tpl *template.Template
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBasicFormatter(left interface{}) *BasicFormatter {
|
||||||
|
tpl := template.Must(template.New("block").Funcs(tplFuncMap).Parse(tplBlock))
|
||||||
|
tpl = template.Must(tpl.New("change").Funcs(tplFuncMap).Parse(tplChange))
|
||||||
|
tpl = template.Must(tpl.New("summary").Funcs(tplFuncMap).Parse(tplSummary))
|
||||||
|
|
||||||
|
return &BasicFormatter{
|
||||||
|
jsonDiff: NewJSONFormatter(left),
|
||||||
|
tpl: tpl,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasicFormatter) Format(d diff.Diff) ([]byte, error) {
|
||||||
|
// calling jsonDiff.Format(d) populates the JSON diff's "Lines" value,
|
||||||
|
// which we use to compute the basic dif
|
||||||
|
_, err := b.jsonDiff.Format(d)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bd := &BasicDiff{}
|
||||||
|
blocks := bd.Basic(b.jsonDiff.Lines)
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
|
||||||
|
err = b.tpl.ExecuteTemplate(buf, "block", blocks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Basic is V2 of the basic diff
|
||||||
|
func (b *BasicDiff) Basic(lines []*JSONLine) []*BasicBlock {
|
||||||
|
// init an array you can append to for the basic "blocks"
|
||||||
|
blocks := make([]*BasicBlock, 0)
|
||||||
|
|
||||||
|
// iterate through each line
|
||||||
|
for _, line := range lines {
|
||||||
|
// TODO: this condition needs an explaination? what does it mean?
|
||||||
|
if b.LastIndent == 2 && line.Indent == 1 && line.Change == ChangeNil {
|
||||||
|
if b.Block != nil {
|
||||||
|
blocks = append(blocks, b.Block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.LastIndent = line.Indent
|
||||||
|
|
||||||
|
// TODO: why special handling for indent 2?
|
||||||
|
if line.Indent == 1 {
|
||||||
|
switch line.Change {
|
||||||
|
case ChangeNil:
|
||||||
|
if line.Change == ChangeNil {
|
||||||
|
if line.Key != "" {
|
||||||
|
b.Block = &BasicBlock{
|
||||||
|
Title: line.Key,
|
||||||
|
Change: line.Change,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case ChangeAdded, ChangeDeleted:
|
||||||
|
blocks = append(blocks, &BasicBlock{
|
||||||
|
Title: line.Key,
|
||||||
|
Change: line.Change,
|
||||||
|
New: line.Val,
|
||||||
|
LineStart: line.LineNum,
|
||||||
|
})
|
||||||
|
|
||||||
|
case ChangeOld:
|
||||||
|
b.Block = &BasicBlock{
|
||||||
|
Title: line.Key,
|
||||||
|
Old: line.Val,
|
||||||
|
Change: line.Change,
|
||||||
|
LineStart: line.LineNum,
|
||||||
|
}
|
||||||
|
|
||||||
|
case ChangeNew:
|
||||||
|
b.Block.New = line.Val
|
||||||
|
b.Block.LineEnd = line.LineNum
|
||||||
|
|
||||||
|
// then write out the change
|
||||||
|
blocks = append(blocks, b.Block)
|
||||||
|
default:
|
||||||
|
// ok
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: why special handling for indent > 2 ?
|
||||||
|
// Other Lines
|
||||||
|
if line.Indent > 1 {
|
||||||
|
// Ensure single line change
|
||||||
|
if line.Key != "" && line.Val != nil && !b.writing {
|
||||||
|
switch line.Change {
|
||||||
|
case ChangeAdded, ChangeDeleted:
|
||||||
|
|
||||||
|
b.Block.Changes = append(b.Block.Changes, &BasicChange{
|
||||||
|
Key: line.Key,
|
||||||
|
Change: line.Change,
|
||||||
|
New: line.Val,
|
||||||
|
LineStart: line.LineNum,
|
||||||
|
})
|
||||||
|
|
||||||
|
case ChangeOld:
|
||||||
|
b.Change = &BasicChange{
|
||||||
|
Key: line.Key,
|
||||||
|
Change: line.Change,
|
||||||
|
Old: line.Val,
|
||||||
|
LineStart: line.LineNum,
|
||||||
|
}
|
||||||
|
|
||||||
|
case ChangeNew:
|
||||||
|
b.Change.New = line.Val
|
||||||
|
b.Change.LineEnd = line.LineNum
|
||||||
|
b.Block.Changes = append(b.Block.Changes, b.Change)
|
||||||
|
|
||||||
|
default:
|
||||||
|
//ok
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if line.Change != ChangeUnchanged {
|
||||||
|
if line.Key != "" {
|
||||||
|
b.narrow = line.Key
|
||||||
|
b.keysIdent = line.Indent
|
||||||
|
}
|
||||||
|
|
||||||
|
if line.Change != ChangeNil {
|
||||||
|
if !b.writing {
|
||||||
|
b.writing = true
|
||||||
|
key := b.Block.Title
|
||||||
|
|
||||||
|
if b.narrow != "" {
|
||||||
|
key = b.narrow
|
||||||
|
if b.keysIdent > line.Indent {
|
||||||
|
key = b.Block.Title
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Summary = &BasicSummary{
|
||||||
|
Key: key,
|
||||||
|
Change: line.Change,
|
||||||
|
LineStart: line.LineNum,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if b.writing {
|
||||||
|
b.writing = false
|
||||||
|
b.Summary.LineEnd = line.LineNum
|
||||||
|
b.Block.Summaries = append(b.Block.Summaries, b.Summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return blocks
|
||||||
|
}
|
||||||
|
|
||||||
|
// encStateMap is used in the template helper
|
||||||
|
var (
|
||||||
|
encStateMap = map[ChangeType]string{
|
||||||
|
ChangeAdded: "added",
|
||||||
|
ChangeDeleted: "deleted",
|
||||||
|
ChangeOld: "changed",
|
||||||
|
ChangeNew: "changed",
|
||||||
|
}
|
||||||
|
|
||||||
|
// tplFuncMap is the function map for each template
|
||||||
|
tplFuncMap = template.FuncMap{
|
||||||
|
"getChange": func(c ChangeType) string {
|
||||||
|
state, ok := encStateMap[c]
|
||||||
|
if !ok {
|
||||||
|
return "changed"
|
||||||
|
}
|
||||||
|
return state
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// tplBlock is the whole thing
|
||||||
|
tplBlock = `{{ define "block" -}}
|
||||||
|
{{ range . }}
|
||||||
|
<div class="diff-group">
|
||||||
|
<div class="diff-block">
|
||||||
|
<h2 class="diff-block-title">
|
||||||
|
<i class="diff-circle diff-circle-{{ getChange .Change }} fa fa-circle"></i>
|
||||||
|
<strong class="diff-title">{{ .Title }}</strong> {{ getChange .Change }}
|
||||||
|
</h2>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Overview -->
|
||||||
|
{{ if .Old }}
|
||||||
|
<div class="diff-label">{{ .Old }}</div>
|
||||||
|
<i class="diff-arrow fa fa-long-arrow-right"></i>
|
||||||
|
{{ end }}
|
||||||
|
{{ if .New }}
|
||||||
|
<div class="diff-label">{{ .New }}</div>
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
{{ if .LineStart }}
|
||||||
|
<diff-link-json
|
||||||
|
line-link="{{ .LineStart }}"
|
||||||
|
line-display="{{ .LineStart }}{{ if .LineEnd }} - {{ .LineEnd }}{{ end }}"
|
||||||
|
switch-view="ctrl.getDiff('html')"
|
||||||
|
/>
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Basic Changes -->
|
||||||
|
{{ range .Changes }}
|
||||||
|
<ul class="diff-change-container">
|
||||||
|
{{ template "change" . }}
|
||||||
|
</ul>
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
<!-- Basic Summary -->
|
||||||
|
{{ range .Summaries }}
|
||||||
|
{{ template "summary" . }}
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
</div>
|
||||||
|
{{ end }}
|
||||||
|
{{ end }}`
|
||||||
|
|
||||||
|
// tplChange is the template for changes
|
||||||
|
tplChange = `{{ define "change" -}}
|
||||||
|
<li class="diff-change-group">
|
||||||
|
<span class="bullet-position-container">
|
||||||
|
<div class="diff-change-item diff-change-title">{{ getChange .Change }} {{ .Key }}</div>
|
||||||
|
|
||||||
|
<div class="diff-change-item">
|
||||||
|
{{ if .Old }}
|
||||||
|
<div class="diff-label">{{ .Old }}</div>
|
||||||
|
<i class="diff-arrow fa fa-long-arrow-right"></i>
|
||||||
|
{{ end }}
|
||||||
|
{{ if .New }}
|
||||||
|
<div class="diff-label">{{ .New }}</div>
|
||||||
|
{{ end }}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{{ if .LineStart }}
|
||||||
|
<diff-link-json
|
||||||
|
line-link="{{ .LineStart }}"
|
||||||
|
line-display="{{ .LineStart }}{{ if .LineEnd }} - {{ .LineEnd }}{{ end }}"
|
||||||
|
switch-view="ctrl.getDiff('json')"
|
||||||
|
/>
|
||||||
|
{{ end }}
|
||||||
|
</span>
|
||||||
|
</li>
|
||||||
|
{{ end }}`
|
||||||
|
|
||||||
|
// tplSummary is for basis summaries
|
||||||
|
tplSummary = `{{ define "summary" -}}
|
||||||
|
<div class="diff-group-name">
|
||||||
|
<i class="diff-circle diff-circle-{{ getChange .Change }} fa fa-circle-o diff-list-circle"></i>
|
||||||
|
|
||||||
|
{{ if .Count }}
|
||||||
|
<strong>{{ .Count }}</strong>
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
{{ if .Key }}
|
||||||
|
<strong class="diff-summary-key">{{ .Key }}</strong>
|
||||||
|
{{ getChange .Change }}
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
{{ if .LineStart }}
|
||||||
|
<diff-link-json
|
||||||
|
line-link="{{ .LineStart }}"
|
||||||
|
line-display="{{ .LineStart }}{{ if .LineEnd }} - {{ .LineEnd }}{{ end }}"
|
||||||
|
switch-view="ctrl.getDiff('json')"
|
||||||
|
/>
|
||||||
|
{{ end }}
|
||||||
|
</div>
|
||||||
|
{{ end }}`
|
||||||
|
)
|
477
pkg/components/dashdiffs/formatter_json.go
Normal file
477
pkg/components/dashdiffs/formatter_json.go
Normal file
@ -0,0 +1,477 @@
|
|||||||
|
package dashdiffs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"html/template"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
diff "github.com/yudai/gojsondiff"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ChangeType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
ChangeNil ChangeType = iota
|
||||||
|
ChangeAdded
|
||||||
|
ChangeDeleted
|
||||||
|
ChangeOld
|
||||||
|
ChangeNew
|
||||||
|
ChangeUnchanged
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// changeTypeToSymbol is used for populating the terminating characer in
|
||||||
|
// the diff
|
||||||
|
changeTypeToSymbol = map[ChangeType]string{
|
||||||
|
ChangeNil: "",
|
||||||
|
ChangeAdded: "+",
|
||||||
|
ChangeDeleted: "-",
|
||||||
|
ChangeOld: "-",
|
||||||
|
ChangeNew: "+",
|
||||||
|
}
|
||||||
|
|
||||||
|
// changeTypeToName is used for populating class names in the diff
|
||||||
|
changeTypeToName = map[ChangeType]string{
|
||||||
|
ChangeNil: "same",
|
||||||
|
ChangeAdded: "added",
|
||||||
|
ChangeDeleted: "deleted",
|
||||||
|
ChangeOld: "old",
|
||||||
|
ChangeNew: "new",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// tplJSONDiffWrapper is the template that wraps a diff
|
||||||
|
tplJSONDiffWrapper = `{{ define "JSONDiffWrapper" -}}
|
||||||
|
{{ range $index, $element := . }}
|
||||||
|
{{ template "JSONDiffLine" $element }}
|
||||||
|
{{ end }}
|
||||||
|
{{ end }}`
|
||||||
|
|
||||||
|
// tplJSONDiffLine is the template that prints each line in a diff
|
||||||
|
tplJSONDiffLine = `{{ define "JSONDiffLine" -}}
|
||||||
|
<p id="l{{ .LineNum }}" class="diff-line diff-json-{{ cton .Change }}">
|
||||||
|
<span class="diff-line-number">
|
||||||
|
{{if .LeftLine }}{{ .LeftLine }}{{ end }}
|
||||||
|
</span>
|
||||||
|
<span class="diff-line-number">
|
||||||
|
{{if .RightLine }}{{ .RightLine }}{{ end }}
|
||||||
|
</span>
|
||||||
|
<span class="diff-value diff-indent-{{ .Indent }}" title="{{ .Text }}">
|
||||||
|
{{ .Text }}
|
||||||
|
</span>
|
||||||
|
<span class="diff-line-icon">{{ ctos .Change }}</span>
|
||||||
|
</p>
|
||||||
|
{{ end }}`
|
||||||
|
)
|
||||||
|
|
||||||
|
var diffTplFuncs = template.FuncMap{
|
||||||
|
"ctos": func(c ChangeType) string {
|
||||||
|
if symbol, ok := changeTypeToSymbol[c]; ok {
|
||||||
|
return symbol
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
"cton": func(c ChangeType) string {
|
||||||
|
if name, ok := changeTypeToName[c]; ok {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONLine contains the data required to render each line of the JSON diff
|
||||||
|
// and contains the data required to produce the tokens output in the basic
|
||||||
|
// diff.
|
||||||
|
type JSONLine struct {
|
||||||
|
LineNum int `json:"line"`
|
||||||
|
LeftLine int `json:"leftLine"`
|
||||||
|
RightLine int `json:"rightLine"`
|
||||||
|
Indent int `json:"indent"`
|
||||||
|
Text string `json:"text"`
|
||||||
|
Change ChangeType `json:"changeType"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
Val interface{} `json:"value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewJSONFormatter(left interface{}) *JSONFormatter {
|
||||||
|
tpl := template.Must(template.New("JSONDiffWrapper").Funcs(diffTplFuncs).Parse(tplJSONDiffWrapper))
|
||||||
|
tpl = template.Must(tpl.New("JSONDiffLine").Funcs(diffTplFuncs).Parse(tplJSONDiffLine))
|
||||||
|
|
||||||
|
return &JSONFormatter{
|
||||||
|
left: left,
|
||||||
|
Lines: []*JSONLine{},
|
||||||
|
tpl: tpl,
|
||||||
|
path: []string{},
|
||||||
|
size: []int{},
|
||||||
|
lineCount: 0,
|
||||||
|
inArray: []bool{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type JSONFormatter struct {
|
||||||
|
left interface{}
|
||||||
|
path []string
|
||||||
|
size []int
|
||||||
|
inArray []bool
|
||||||
|
lineCount int
|
||||||
|
leftLine int
|
||||||
|
rightLine int
|
||||||
|
line *AsciiLine
|
||||||
|
Lines []*JSONLine
|
||||||
|
tpl *template.Template
|
||||||
|
}
|
||||||
|
|
||||||
|
type AsciiLine struct {
|
||||||
|
// the type of change
|
||||||
|
change ChangeType
|
||||||
|
|
||||||
|
// the actual changes - no formatting
|
||||||
|
key string
|
||||||
|
val interface{}
|
||||||
|
|
||||||
|
// level of indentation for the current line
|
||||||
|
indent int
|
||||||
|
|
||||||
|
// buffer containing the fully formatted line
|
||||||
|
buffer *bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) Format(diff diff.Diff) (result string, err error) {
|
||||||
|
if v, ok := f.left.(map[string]interface{}); ok {
|
||||||
|
f.formatObject(v, diff)
|
||||||
|
} else if v, ok := f.left.([]interface{}); ok {
|
||||||
|
f.formatArray(v, diff)
|
||||||
|
} else {
|
||||||
|
return "", fmt.Errorf("expected map[string]interface{} or []interface{}, got %T",
|
||||||
|
f.left)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
err = f.tpl.ExecuteTemplate(b, "JSONDiffWrapper", f.Lines)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("%v\n", err)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return b.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) formatObject(left map[string]interface{}, df diff.Diff) {
|
||||||
|
f.addLineWith(ChangeNil, "{")
|
||||||
|
f.push("ROOT", len(left), false)
|
||||||
|
f.processObject(left, df.Deltas())
|
||||||
|
f.pop()
|
||||||
|
f.addLineWith(ChangeNil, "}")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) formatArray(left []interface{}, df diff.Diff) {
|
||||||
|
f.addLineWith(ChangeNil, "[")
|
||||||
|
f.push("ROOT", len(left), true)
|
||||||
|
f.processArray(left, df.Deltas())
|
||||||
|
f.pop()
|
||||||
|
f.addLineWith(ChangeNil, "]")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) processArray(array []interface{}, deltas []diff.Delta) error {
|
||||||
|
patchedIndex := 0
|
||||||
|
for index, value := range array {
|
||||||
|
f.processItem(value, deltas, diff.Index(index))
|
||||||
|
patchedIndex++
|
||||||
|
}
|
||||||
|
|
||||||
|
// additional Added
|
||||||
|
for _, delta := range deltas {
|
||||||
|
switch delta.(type) {
|
||||||
|
case *diff.Added:
|
||||||
|
d := delta.(*diff.Added)
|
||||||
|
// skip items already processed
|
||||||
|
if int(d.Position.(diff.Index)) < len(array) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
f.printRecursive(d.Position.String(), d.Value, ChangeAdded)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) processObject(object map[string]interface{}, deltas []diff.Delta) error {
|
||||||
|
names := sortKeys(object)
|
||||||
|
for _, name := range names {
|
||||||
|
value := object[name]
|
||||||
|
f.processItem(value, deltas, diff.Name(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Added
|
||||||
|
for _, delta := range deltas {
|
||||||
|
switch delta.(type) {
|
||||||
|
case *diff.Added:
|
||||||
|
d := delta.(*diff.Added)
|
||||||
|
f.printRecursive(d.Position.String(), d.Value, ChangeAdded)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) processItem(value interface{}, deltas []diff.Delta, position diff.Position) error {
|
||||||
|
matchedDeltas := f.searchDeltas(deltas, position)
|
||||||
|
positionStr := position.String()
|
||||||
|
if len(matchedDeltas) > 0 {
|
||||||
|
for _, matchedDelta := range matchedDeltas {
|
||||||
|
|
||||||
|
switch matchedDelta.(type) {
|
||||||
|
case *diff.Object:
|
||||||
|
d := matchedDelta.(*diff.Object)
|
||||||
|
switch value.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
//ok
|
||||||
|
default:
|
||||||
|
return errors.New("Type mismatch")
|
||||||
|
}
|
||||||
|
o := value.(map[string]interface{})
|
||||||
|
|
||||||
|
f.newLine(ChangeNil)
|
||||||
|
f.printKey(positionStr)
|
||||||
|
f.print("{")
|
||||||
|
f.closeLine()
|
||||||
|
f.push(positionStr, len(o), false)
|
||||||
|
f.processObject(o, d.Deltas)
|
||||||
|
f.pop()
|
||||||
|
f.newLine(ChangeNil)
|
||||||
|
f.print("}")
|
||||||
|
f.printComma()
|
||||||
|
f.closeLine()
|
||||||
|
|
||||||
|
case *diff.Array:
|
||||||
|
d := matchedDelta.(*diff.Array)
|
||||||
|
switch value.(type) {
|
||||||
|
case []interface{}:
|
||||||
|
//ok
|
||||||
|
default:
|
||||||
|
return errors.New("Type mismatch")
|
||||||
|
}
|
||||||
|
a := value.([]interface{})
|
||||||
|
|
||||||
|
f.newLine(ChangeNil)
|
||||||
|
f.printKey(positionStr)
|
||||||
|
f.print("[")
|
||||||
|
f.closeLine()
|
||||||
|
f.push(positionStr, len(a), true)
|
||||||
|
f.processArray(a, d.Deltas)
|
||||||
|
f.pop()
|
||||||
|
f.newLine(ChangeNil)
|
||||||
|
f.print("]")
|
||||||
|
f.printComma()
|
||||||
|
f.closeLine()
|
||||||
|
|
||||||
|
case *diff.Added:
|
||||||
|
d := matchedDelta.(*diff.Added)
|
||||||
|
f.printRecursive(positionStr, d.Value, ChangeAdded)
|
||||||
|
f.size[len(f.size)-1]++
|
||||||
|
|
||||||
|
case *diff.Modified:
|
||||||
|
d := matchedDelta.(*diff.Modified)
|
||||||
|
savedSize := f.size[len(f.size)-1]
|
||||||
|
f.printRecursive(positionStr, d.OldValue, ChangeOld)
|
||||||
|
f.size[len(f.size)-1] = savedSize
|
||||||
|
f.printRecursive(positionStr, d.NewValue, ChangeNew)
|
||||||
|
|
||||||
|
case *diff.TextDiff:
|
||||||
|
savedSize := f.size[len(f.size)-1]
|
||||||
|
d := matchedDelta.(*diff.TextDiff)
|
||||||
|
f.printRecursive(positionStr, d.OldValue, ChangeOld)
|
||||||
|
f.size[len(f.size)-1] = savedSize
|
||||||
|
f.printRecursive(positionStr, d.NewValue, ChangeNew)
|
||||||
|
|
||||||
|
case *diff.Deleted:
|
||||||
|
d := matchedDelta.(*diff.Deleted)
|
||||||
|
f.printRecursive(positionStr, d.Value, ChangeDeleted)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return errors.New("Unknown Delta type detected")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
f.printRecursive(positionStr, value, ChangeUnchanged)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) searchDeltas(deltas []diff.Delta, postion diff.Position) (results []diff.Delta) {
|
||||||
|
results = make([]diff.Delta, 0)
|
||||||
|
for _, delta := range deltas {
|
||||||
|
switch delta.(type) {
|
||||||
|
case diff.PostDelta:
|
||||||
|
if delta.(diff.PostDelta).PostPosition() == postion {
|
||||||
|
results = append(results, delta)
|
||||||
|
}
|
||||||
|
case diff.PreDelta:
|
||||||
|
if delta.(diff.PreDelta).PrePosition() == postion {
|
||||||
|
results = append(results, delta)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("heh")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) push(name string, size int, array bool) {
|
||||||
|
f.path = append(f.path, name)
|
||||||
|
f.size = append(f.size, size)
|
||||||
|
f.inArray = append(f.inArray, array)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) pop() {
|
||||||
|
f.path = f.path[0 : len(f.path)-1]
|
||||||
|
f.size = f.size[0 : len(f.size)-1]
|
||||||
|
f.inArray = f.inArray[0 : len(f.inArray)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) addLineWith(change ChangeType, value string) {
|
||||||
|
f.line = &AsciiLine{
|
||||||
|
change: change,
|
||||||
|
indent: len(f.path),
|
||||||
|
buffer: bytes.NewBufferString(value),
|
||||||
|
}
|
||||||
|
f.closeLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) newLine(change ChangeType) {
|
||||||
|
f.line = &AsciiLine{
|
||||||
|
change: change,
|
||||||
|
indent: len(f.path),
|
||||||
|
buffer: bytes.NewBuffer([]byte{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) closeLine() {
|
||||||
|
leftLine := 0
|
||||||
|
rightLine := 0
|
||||||
|
f.lineCount++
|
||||||
|
|
||||||
|
switch f.line.change {
|
||||||
|
case ChangeAdded, ChangeNew:
|
||||||
|
f.rightLine++
|
||||||
|
rightLine = f.rightLine
|
||||||
|
|
||||||
|
case ChangeDeleted, ChangeOld:
|
||||||
|
f.leftLine++
|
||||||
|
leftLine = f.leftLine
|
||||||
|
|
||||||
|
case ChangeNil, ChangeUnchanged:
|
||||||
|
f.rightLine++
|
||||||
|
f.leftLine++
|
||||||
|
rightLine = f.rightLine
|
||||||
|
leftLine = f.leftLine
|
||||||
|
}
|
||||||
|
|
||||||
|
s := f.line.buffer.String()
|
||||||
|
f.Lines = append(f.Lines, &JSONLine{
|
||||||
|
LineNum: f.lineCount,
|
||||||
|
RightLine: rightLine,
|
||||||
|
LeftLine: leftLine,
|
||||||
|
Indent: f.line.indent,
|
||||||
|
Text: s,
|
||||||
|
Change: f.line.change,
|
||||||
|
Key: f.line.key,
|
||||||
|
Val: f.line.val,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) printKey(name string) {
|
||||||
|
if !f.inArray[len(f.inArray)-1] {
|
||||||
|
f.line.key = name
|
||||||
|
fmt.Fprintf(f.line.buffer, `"%s": `, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) printComma() {
|
||||||
|
f.size[len(f.size)-1]--
|
||||||
|
if f.size[len(f.size)-1] > 0 {
|
||||||
|
f.line.buffer.WriteRune(',')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) printValue(value interface{}) {
|
||||||
|
switch value.(type) {
|
||||||
|
case string:
|
||||||
|
f.line.val = value
|
||||||
|
fmt.Fprintf(f.line.buffer, `"%s"`, value)
|
||||||
|
case nil:
|
||||||
|
f.line.val = "null"
|
||||||
|
f.line.buffer.WriteString("null")
|
||||||
|
default:
|
||||||
|
f.line.val = value
|
||||||
|
fmt.Fprintf(f.line.buffer, `%#v`, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) print(a string) {
|
||||||
|
f.line.buffer.WriteString(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) printRecursive(name string, value interface{}, change ChangeType) {
|
||||||
|
switch value.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
f.newLine(change)
|
||||||
|
f.printKey(name)
|
||||||
|
f.print("{")
|
||||||
|
f.closeLine()
|
||||||
|
|
||||||
|
m := value.(map[string]interface{})
|
||||||
|
size := len(m)
|
||||||
|
f.push(name, size, false)
|
||||||
|
|
||||||
|
keys := sortKeys(m)
|
||||||
|
for _, key := range keys {
|
||||||
|
f.printRecursive(key, m[key], change)
|
||||||
|
}
|
||||||
|
f.pop()
|
||||||
|
|
||||||
|
f.newLine(change)
|
||||||
|
f.print("}")
|
||||||
|
f.printComma()
|
||||||
|
f.closeLine()
|
||||||
|
|
||||||
|
case []interface{}:
|
||||||
|
f.newLine(change)
|
||||||
|
f.printKey(name)
|
||||||
|
f.print("[")
|
||||||
|
f.closeLine()
|
||||||
|
|
||||||
|
s := value.([]interface{})
|
||||||
|
size := len(s)
|
||||||
|
f.push("", size, true)
|
||||||
|
for _, item := range s {
|
||||||
|
f.printRecursive("", item, change)
|
||||||
|
}
|
||||||
|
f.pop()
|
||||||
|
|
||||||
|
f.newLine(change)
|
||||||
|
f.print("]")
|
||||||
|
f.printComma()
|
||||||
|
f.closeLine()
|
||||||
|
|
||||||
|
default:
|
||||||
|
f.newLine(change)
|
||||||
|
f.printKey(name)
|
||||||
|
f.printValue(value)
|
||||||
|
f.printComma()
|
||||||
|
f.closeLine()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortKeys(m map[string]interface{}) (keys []string) {
|
||||||
|
keys = make([]string, 0, len(m))
|
||||||
|
for key := range m {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
return
|
||||||
|
}
|
@ -47,10 +47,11 @@ func NewImageUploader() (ImageUploader, error) {
|
|||||||
return nil, fmt.Errorf("Could not find url key for image.uploader.webdav")
|
return nil, fmt.Errorf("Could not find url key for image.uploader.webdav")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public_url := webdavSec.Key("public_url").String()
|
||||||
username := webdavSec.Key("username").String()
|
username := webdavSec.Key("username").String()
|
||||||
password := webdavSec.Key("password").String()
|
password := webdavSec.Key("password").String()
|
||||||
|
|
||||||
return NewWebdavImageUploader(url, username, password)
|
return NewWebdavImageUploader(url, username, password, public_url)
|
||||||
}
|
}
|
||||||
|
|
||||||
return NopImageUploader{}, nil
|
return NopImageUploader{}, nil
|
||||||
|
@ -78,5 +78,9 @@ func (u *S3Uploader) Upload(imageDiskPath string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return "https://" + u.bucket + ".s3.amazonaws.com/" + key, nil
|
if u.region == "us-east-1" {
|
||||||
|
return "https://" + u.bucket + ".s3.amazonaws.com/" + key, nil
|
||||||
|
} else {
|
||||||
|
return "https://" + u.bucket + ".s3-" + u.region + ".amazonaws.com/" + key, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestUploadToS3(t *testing.T) {
|
func TestUploadToS3(t *testing.T) {
|
||||||
SkipConvey("[Integration test] for external_image_store.webdav", t, func() {
|
SkipConvey("[Integration test] for external_image_store.s3", t, func() {
|
||||||
setting.NewConfigContext(&setting.CommandLineArgs{
|
setting.NewConfigContext(&setting.CommandLineArgs{
|
||||||
HomePath: "../../../",
|
HomePath: "../../../",
|
||||||
})
|
})
|
||||||
|
@ -14,12 +14,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type WebdavUploader struct {
|
type WebdavUploader struct {
|
||||||
url string
|
url string
|
||||||
username string
|
username string
|
||||||
password string
|
password string
|
||||||
|
public_url string
|
||||||
}
|
}
|
||||||
|
|
||||||
var netTransport = &http.Transport{
|
var netTransport = &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
Dial: (&net.Dialer{
|
Dial: (&net.Dialer{
|
||||||
Timeout: 60 * time.Second,
|
Timeout: 60 * time.Second,
|
||||||
}).Dial,
|
}).Dial,
|
||||||
@ -33,7 +35,8 @@ var netClient = &http.Client{
|
|||||||
|
|
||||||
func (u *WebdavUploader) Upload(pa string) (string, error) {
|
func (u *WebdavUploader) Upload(pa string) (string, error) {
|
||||||
url, _ := url.Parse(u.url)
|
url, _ := url.Parse(u.url)
|
||||||
url.Path = path.Join(url.Path, util.GetRandomString(20)+".png")
|
filename := util.GetRandomString(20) + ".png"
|
||||||
|
url.Path = path.Join(url.Path, filename)
|
||||||
|
|
||||||
imgData, err := ioutil.ReadFile(pa)
|
imgData, err := ioutil.ReadFile(pa)
|
||||||
req, err := http.NewRequest("PUT", url.String(), bytes.NewReader(imgData))
|
req, err := http.NewRequest("PUT", url.String(), bytes.NewReader(imgData))
|
||||||
@ -53,13 +56,20 @@ func (u *WebdavUploader) Upload(pa string) (string, error) {
|
|||||||
return "", fmt.Errorf("Failed to upload image. Returned statuscode %v body %s", res.StatusCode, body)
|
return "", fmt.Errorf("Failed to upload image. Returned statuscode %v body %s", res.StatusCode, body)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if u.public_url != "" {
|
||||||
|
publicURL, _ := url.Parse(u.public_url)
|
||||||
|
publicURL.Path = path.Join(publicURL.Path, filename)
|
||||||
|
return publicURL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
return url.String(), nil
|
return url.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWebdavImageUploader(url, username, passwrod string) (*WebdavUploader, error) {
|
func NewWebdavImageUploader(url, username, password, public_url string) (*WebdavUploader, error) {
|
||||||
return &WebdavUploader{
|
return &WebdavUploader{
|
||||||
url: url,
|
url: url,
|
||||||
username: username,
|
username: username,
|
||||||
password: passwrod,
|
password: password,
|
||||||
|
public_url: public_url,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -7,12 +7,21 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestUploadToWebdav(t *testing.T) {
|
func TestUploadToWebdav(t *testing.T) {
|
||||||
webdavUploader, _ := NewWebdavImageUploader("http://localhost:9998/dav/", "username", "password")
|
|
||||||
|
|
||||||
|
// Can be tested with this docker container: https://hub.docker.com/r/morrisjobke/webdav/
|
||||||
SkipConvey("[Integration test] for external_image_store.webdav", t, func() {
|
SkipConvey("[Integration test] for external_image_store.webdav", t, func() {
|
||||||
|
webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "")
|
||||||
path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png")
|
path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png")
|
||||||
|
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
So(path, ShouldNotEqual, "")
|
So(path, ShouldStartWith, "http://localhost:8888/webdav/")
|
||||||
|
})
|
||||||
|
|
||||||
|
SkipConvey("[Integration test] for external_image_store.webdav with public url", t, func() {
|
||||||
|
webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "http://publicurl:8888/webdav")
|
||||||
|
path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png")
|
||||||
|
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(path, ShouldStartWith, "http://publicurl:8888/webdav/")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package securejsondata
|
package securejsondata
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/grafana/grafana/pkg/log"
|
||||||
"github.com/grafana/grafana/pkg/setting"
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
"github.com/grafana/grafana/pkg/util"
|
"github.com/grafana/grafana/pkg/util"
|
||||||
)
|
)
|
||||||
@ -10,7 +11,12 @@ type SecureJsonData map[string][]byte
|
|||||||
func (s SecureJsonData) Decrypt() map[string]string {
|
func (s SecureJsonData) Decrypt() map[string]string {
|
||||||
decrypted := make(map[string]string)
|
decrypted := make(map[string]string)
|
||||||
for key, data := range s {
|
for key, data := range s {
|
||||||
decrypted[key] = string(util.Decrypt(data, setting.SecretKey))
|
decryptedData, err := util.Decrypt(data, setting.SecretKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(4, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
decrypted[key] = string(decryptedData)
|
||||||
}
|
}
|
||||||
return decrypted
|
return decrypted
|
||||||
}
|
}
|
||||||
@ -18,7 +24,12 @@ func (s SecureJsonData) Decrypt() map[string]string {
|
|||||||
func GetEncryptedJsonData(sjd map[string]string) SecureJsonData {
|
func GetEncryptedJsonData(sjd map[string]string) SecureJsonData {
|
||||||
encrypted := make(SecureJsonData)
|
encrypted := make(SecureJsonData)
|
||||||
for key, data := range sjd {
|
for key, data := range sjd {
|
||||||
encrypted[key] = util.Encrypt([]byte(data), setting.SecretKey)
|
encryptedData, err := util.Encrypt([]byte(data), setting.SecretKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(4, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
encrypted[key] = encryptedData
|
||||||
}
|
}
|
||||||
return encrypted
|
return encrypted
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,8 @@ import (
|
|||||||
"github.com/go-stack/stack"
|
"github.com/go-stack/stack"
|
||||||
"github.com/inconshreveable/log15"
|
"github.com/inconshreveable/log15"
|
||||||
"github.com/inconshreveable/log15/term"
|
"github.com/inconshreveable/log15/term"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Root log15.Logger
|
var Root log15.Logger
|
||||||
@ -34,7 +36,7 @@ func New(logger string, ctx ...interface{}) Logger {
|
|||||||
func Trace(format string, v ...interface{}) {
|
func Trace(format string, v ...interface{}) {
|
||||||
var message string
|
var message string
|
||||||
if len(v) > 0 {
|
if len(v) > 0 {
|
||||||
message = fmt.Sprintf(format, v)
|
message = fmt.Sprintf(format, v...)
|
||||||
} else {
|
} else {
|
||||||
message = format
|
message = format
|
||||||
}
|
}
|
||||||
@ -45,7 +47,7 @@ func Trace(format string, v ...interface{}) {
|
|||||||
func Debug(format string, v ...interface{}) {
|
func Debug(format string, v ...interface{}) {
|
||||||
var message string
|
var message string
|
||||||
if len(v) > 0 {
|
if len(v) > 0 {
|
||||||
message = fmt.Sprintf(format, v)
|
message = fmt.Sprintf(format, v...)
|
||||||
} else {
|
} else {
|
||||||
message = format
|
message = format
|
||||||
}
|
}
|
||||||
@ -60,7 +62,7 @@ func Debug2(message string, v ...interface{}) {
|
|||||||
func Info(format string, v ...interface{}) {
|
func Info(format string, v ...interface{}) {
|
||||||
var message string
|
var message string
|
||||||
if len(v) > 0 {
|
if len(v) > 0 {
|
||||||
message = fmt.Sprintf(format, v)
|
message = fmt.Sprintf(format, v...)
|
||||||
} else {
|
} else {
|
||||||
message = format
|
message = format
|
||||||
}
|
}
|
||||||
@ -75,7 +77,7 @@ func Info2(message string, v ...interface{}) {
|
|||||||
func Warn(format string, v ...interface{}) {
|
func Warn(format string, v ...interface{}) {
|
||||||
var message string
|
var message string
|
||||||
if len(v) > 0 {
|
if len(v) > 0 {
|
||||||
message = fmt.Sprintf(format, v)
|
message = fmt.Sprintf(format, v...)
|
||||||
} else {
|
} else {
|
||||||
message = format
|
message = format
|
||||||
}
|
}
|
||||||
@ -88,7 +90,7 @@ func Warn2(message string, v ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Error(skip int, format string, v ...interface{}) {
|
func Error(skip int, format string, v ...interface{}) {
|
||||||
Root.Error(fmt.Sprintf(format, v))
|
Root.Error(fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Error2(message string, v ...interface{}) {
|
func Error2(message string, v ...interface{}) {
|
||||||
@ -96,7 +98,7 @@ func Error2(message string, v ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Critical(skip int, format string, v ...interface{}) {
|
func Critical(skip int, format string, v ...interface{}) {
|
||||||
Root.Crit(fmt.Sprintf(format, v))
|
Root.Crit(fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Fatal(skip int, format string, v ...interface{}) {
|
func Fatal(skip int, format string, v ...interface{}) {
|
||||||
@ -172,7 +174,7 @@ func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) {
|
|||||||
Close()
|
Close()
|
||||||
|
|
||||||
defaultLevelName, _ := getLogLevelFromConfig("log", "info", cfg)
|
defaultLevelName, _ := getLogLevelFromConfig("log", "info", cfg)
|
||||||
defaultFilters := getFilters(cfg.Section("log").Key("filters").Strings(" "))
|
defaultFilters := getFilters(util.SplitString(cfg.Section("log").Key("filters").String()))
|
||||||
|
|
||||||
handlers := make([]log15.Handler, 0)
|
handlers := make([]log15.Handler, 0)
|
||||||
|
|
||||||
@ -185,7 +187,7 @@ func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) {
|
|||||||
|
|
||||||
// Log level.
|
// Log level.
|
||||||
_, level := getLogLevelFromConfig("log."+mode, defaultLevelName, cfg)
|
_, level := getLogLevelFromConfig("log."+mode, defaultLevelName, cfg)
|
||||||
modeFilters := getFilters(sec.Key("filters").Strings(" "))
|
modeFilters := getFilters(util.SplitString(sec.Key("filters").String()))
|
||||||
format := getLogFormat(sec.Key("format").MustString(""))
|
format := getLogFormat(sec.Key("format").MustString(""))
|
||||||
|
|
||||||
var handler log15.Handler
|
var handler log15.Handler
|
||||||
|
@ -124,7 +124,7 @@ func (m *StandardMeter) Count() int64 {
|
|||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark records the occurance of n events.
|
// Mark records the occurrence of n events.
|
||||||
func (m *StandardMeter) Mark(n int64) {
|
func (m *StandardMeter) Mark(n int64) {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
defer m.lock.Unlock()
|
defer m.lock.Unlock()
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
"github.com/grafana/grafana/pkg/setting"
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
)
|
)
|
||||||
|
|
||||||
func initContextWithAuthProxy(ctx *Context) bool {
|
func initContextWithAuthProxy(ctx *Context, orgId int64) bool {
|
||||||
if !setting.AuthProxyEnabled {
|
if !setting.AuthProxyEnabled {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -30,6 +30,7 @@ func initContextWithAuthProxy(ctx *Context) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
query := getSignedInUserQueryForProxyAuth(proxyHeaderValue)
|
query := getSignedInUserQueryForProxyAuth(proxyHeaderValue)
|
||||||
|
query.OrgId = orgId
|
||||||
if err := bus.Dispatch(query); err != nil {
|
if err := bus.Dispatch(query); err != nil {
|
||||||
if err != m.ErrUserNotFound {
|
if err != m.ErrUserNotFound {
|
||||||
ctx.Handle(500, "Failed to find user specified in auth proxy header", err)
|
ctx.Handle(500, "Failed to find user specified in auth proxy header", err)
|
||||||
@ -46,7 +47,7 @@ func initContextWithAuthProxy(ctx *Context) bool {
|
|||||||
ctx.Handle(500, "Failed to create user specified in auth proxy header", err)
|
ctx.Handle(500, "Failed to create user specified in auth proxy header", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
query = &m.GetSignedInUserQuery{UserId: cmd.Result.Id}
|
query = &m.GetSignedInUserQuery{UserId: cmd.Result.Id, OrgId: orgId}
|
||||||
if err := bus.Dispatch(query); err != nil {
|
if err := bus.Dispatch(query); err != nil {
|
||||||
ctx.Handle(500, "Failed find user after creation", err)
|
ctx.Handle(500, "Failed find user after creation", err)
|
||||||
return true
|
return true
|
||||||
|
@ -49,9 +49,9 @@ func Logger() macaron.Handler {
|
|||||||
if ctx, ok := c.Data["ctx"]; ok {
|
if ctx, ok := c.Data["ctx"]; ok {
|
||||||
ctxTyped := ctx.(*Context)
|
ctxTyped := ctx.(*Context)
|
||||||
if status == 500 {
|
if status == 500 {
|
||||||
ctxTyped.Logger.Error("Request Completed", "method", req.Method, "path", req.URL.Path, "status", status, "remote_addr", c.RemoteAddr(), "time_ms", timeTakenMs, "size", rw.Size())
|
ctxTyped.Logger.Error("Request Completed", "method", req.Method, "path", req.URL.Path, "status", status, "remote_addr", c.RemoteAddr(), "time_ms", int64(timeTakenMs), "size", rw.Size(), "referer", req.Referer())
|
||||||
} else {
|
} else {
|
||||||
ctxTyped.Logger.Info("Request Completed", "method", req.Method, "path", req.URL.Path, "status", status, "remote_addr", c.RemoteAddr(), "time_ms", timeTakenMs, "size", rw.Size())
|
ctxTyped.Logger.Info("Request Completed", "method", req.Method, "path", req.URL.Path, "status", status, "remote_addr", c.RemoteAddr(), "time_ms", int64(timeTakenMs), "size", rw.Size(), "referer", req.Referer())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user