Merge branch 'master' into master

This commit is contained in:
Torkel Ödegaard 2018-05-08 21:23:34 +02:00 committed by GitHub
commit c79b1bef15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
547 changed files with 19074 additions and 7634 deletions

View File

@ -1,6 +1,6 @@
[run]
init_cmds = [
["go", "build", "-o", "./bin/grafana-server", "./pkg/cmd/grafana-server"],
["go", "run", "build.go", "-dev", "build-server"],
["./bin/grafana-server", "cfg:app_mode=development"]
]
watch_all = true
@ -12,6 +12,6 @@ watch_dirs = [
watch_exts = [".go", ".ini", ".toml"]
build_delay = 1500
cmds = [
["go", "build", "-o", "./bin/grafana-server", "./pkg/cmd/grafana-server"],
["go", "run", "build.go", "-dev", "build-server"],
["./bin/grafana-server", "cfg:app_mode=development"]
]

View File

@ -1,6 +1,41 @@
version: 2
jobs:
codespell:
docker:
- image: circleci/python
steps:
- checkout
- run:
name: install codespell
command: 'sudo pip install codespell'
- run:
# Important: all words have to be in lowercase, and separated by "\n".
name: exclude known exceptions
command: 'echo -e "unknwon" > words_to_ignore.txt'
- run:
name: check documentation spelling errors
command: 'codespell -I ./words_to_ignore.txt docs/'
gometalinter:
docker:
- image: circleci/golang:1.10
environment:
# we need CGO because of go-sqlite3
CGO_ENABLED: 1
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
- run: 'go get -u gopkg.in/alecthomas/gometalinter.v2'
- run: 'go get -u github.com/tsenart/deadcode'
- run: 'go get -u github.com/gordonklaus/ineffassign'
- run: 'go get -u github.com/opennota/check/cmd/structcheck'
- run: 'go get -u github.com/mdempsky/unconvert'
- run: 'go get -u github.com/opennota/check/cmd/varcheck'
- run:
name: run linters
command: 'gometalinter.v2 --enable-gc --vendor --deadline 10m --disable-all --enable=deadcode --enable=ineffassign --enable=structcheck --enable=unconvert --enable=varcheck ./...'
test-frontend:
docker:
- image: circleci/node:6.11.4
@ -58,6 +93,22 @@ jobs:
- scripts/*.sh
- scripts/publish
build-enterprise:
docker:
- image: grafana/build-container:v0.1
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
- run:
name: build and package grafana
command: './scripts/build/build_enterprise.sh'
- run:
name: sign packages
command: './scripts/build/sign_packages.sh'
- run:
name: sha-sum packages
command: 'go run build.go sha-dist'
deploy-master:
docker:
- image: circleci/python:2.7-stretch
@ -85,7 +136,7 @@ jobs:
- image: circleci/python:2.7-stretch
steps:
- attach_workspace:
at: dist
at: .
- run:
name: install awscli
command: 'sudo pip install awscli'
@ -103,6 +154,14 @@ workflows:
version: 2
test-and-build:
jobs:
- codespell:
filters:
tags:
only: /.*/
- gometalinter:
filters:
tags:
only: /.*/
- build:
filters:
tags:
@ -133,3 +192,7 @@ workflows:
ignore: /.*/
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
# - build-enterprise:
# filters:
# tags:
# only: /.*/

18
.dockerignore Normal file
View File

@ -0,0 +1,18 @@
.awcache
.dockerignore
.git
.gitignore
.github
data*
dist
docker
docs
dump.rdb
node_modules
/local
/tmp
/vendor
*.yml
*.md
/vendor
/tmp

5
.gitignore vendored
View File

@ -1,8 +1,10 @@
node_modules
npm-debug.log
yarn-error.log
coverage/
.aws-config.json
awsconfig
/.awcache
/dist
/public/build
/public/views/index.html
@ -42,10 +44,13 @@ docker-compose.yaml
/conf/provisioning/**/custom.yaml
profile.cov
/grafana
/local
.notouch
/Makefile.local
/pkg/cmd/grafana-cli/grafana-cli
/pkg/cmd/grafana-server/grafana-server
/pkg/cmd/grafana-server/debug
/pkg/extensions
debug.test
/examples/*/dist
/packaging/**/*.rpm

View File

@ -1,4 +1,34 @@
# 5.1.0 (unreleased)
# 5.2.0 (unreleased)
### Minor
* **Graph**: Show invisible highest value bucket in histogram [#11498](https://github.com/grafana/grafana/issues/11498)
* **Dashboard**: Enable "Save As..." if user has edit permission [#11625](https://github.com/grafana/grafana/issues/11625)
* **Prometheus**: Table columns order now changes when rearrange queries [#11690](https://github.com/grafana/grafana/issues/11690), thx [@mtanda](https://github.com/mtanda)
* **Variables**: Fix variable interpolation when using multiple formatting types [#11800](https://github.com/grafana/grafana/issues/11800), thx [@svenklemm](https://github.com/svenklemm)
* **Dashboard**: Fix date selector styling for dark/light theme in time picker control [#11616](https://github.com/grafana/grafana/issues/11616)
* **Discord**: Alert notification channel type for Discord, [#7964](https://github.com/grafana/grafana/issues/7964) thx [@jereksel](https://github.com/jereksel),
* **InfluxDB**: Support SELECT queries in templating query, [#5013](https://github.com/grafana/grafana/issues/5013)
* **Dashboard**: JSON Model under dashboard settings can now be updated & changes saved, [#1429](https://github.com/grafana/grafana/issues/1429), thx [@jereksel](https://github.com/jereksel)
* **Security**: Fix XSS vulnerabilities in dashboard links [#11813](https://github.com/grafana/grafana/pull/11813)
* **Singlestat**: Fix "time of last point" shows local time when dashboard timezone set to UTC [#10338](https://github.com/grafana/grafana/issues/10338)
# 5.1.1 (2018-05-07)
* **LDAP**: LDAP login with MariaDB/MySQL database and dn>100 chars not possible [#11754](https://github.com/grafana/grafana/issues/11754)
* **Build**: AppVeyor Windows build missing version and commit info [#11758](https://github.com/grafana/grafana/issues/11758)
* **Scroll**: Scroll can't start in graphs on Chrome mobile [#11710](https://github.com/grafana/grafana/issues/11710)
* **Units**: Revert renaming of unit key ppm [#11743](https://github.com/grafana/grafana/issues/11743)
# 5.1.0 (2018-04-26)
* **Folders**: Default permissions on folder are not shown as inherited in its dashboards [#11668](https://github.com/grafana/grafana/issues/11668)
* **Templating**: Allow more than 20 previews when creating a variable [#11508](https://github.com/grafana/grafana/issues/11508)
* **Dashboard**: Row edit icon not shown [#11466](https://github.com/grafana/grafana/issues/11466)
* **SQL**: Unsupported data types for value column using time series query [#11703](https://github.com/grafana/grafana/issues/11703)
* **Prometheus**: Prometheus query inspector expands to be very large on autocomplete queries [#11673](https://github.com/grafana/grafana/issues/11673)
# 5.1.0-beta1 (2018-04-20)
* **MSSQL**: New Microsoft SQL Server data source [#10093](https://github.com/grafana/grafana/pull/10093), [#11298](https://github.com/grafana/grafana/pull/11298), thx [@linuxchips](https://github.com/linuxchips)
* **Prometheus**: The heatmap panel now support Prometheus histograms [#10009](https://github.com/grafana/grafana/issues/10009)
@ -13,8 +43,15 @@
* **Prometheus**: Show template variable candidate in query editor [#9210](https://github.com/grafana/grafana/issues/9210), thx [@mtanda](https://github.com/mtanda)
* **Prometheus**: Support POST for query and query_range [#9859](https://github.com/grafana/grafana/pull/9859), thx [@mtanda](https://github.com/mtanda)
* **Alerting**: Add support for retries on alert queries [#5855](https://github.com/grafana/grafana/issues/5855), thx [@Thib17](https://github.com/Thib17)
* **Table**: Table plugin value mappings [#7119](https://github.com/grafana/grafana/issues/7119), thx [infernix](https://github.com/infernix)
* **IE11**: IE 11 compatibility [#11165](https://github.com/grafana/grafana/issues/11165)
* **Scrolling**: Better scrolling experience [#11053](https://github.com/grafana/grafana/issues/11053), [#11252](https://github.com/grafana/grafana/issues/11252), [#10836](https://github.com/grafana/grafana/issues/10836), [#11185](https://github.com/grafana/grafana/issues/11185), [#11168](https://github.com/grafana/grafana/issues/11168)
* **Docker**: Improved docker image (breaking changes regarding file ownership) [grafana-docker #141](https://github.com/grafana/grafana-docker/issues/141), thx [@Spindel](https://github.com/Spindel), [@ChristianKniep](https://github.com/ChristianKniep), [@brancz](https://github.com/brancz) and [@jangaraj](https://github.com/jangaraj)
* **Folders**: A folder admin cannot add user/team permissions for folder/its dashboards [#11173](https://github.com/grafana/grafana/issues/11173)
* **Provisioning**: Improved workflow for provisioned dashboards [#10883](https://github.com/grafana/grafana/issues/10883)
### Minor
* **OpsGenie**: Add triggered alerts as description [#11046](https://github.com/grafana/grafana/pull/11046), thx [@llamashoes](https://github.com/llamashoes)
* **Cloudwatch**: Support high resolution metrics [#10925](https://github.com/grafana/grafana/pull/10925), thx [@mtanda](https://github.com/mtanda)
* **Cloudwatch**: Add dimension filtering to CloudWatch `dimension_values()` [#10029](https://github.com/grafana/grafana/issues/10029), thx [@willyhutw](https://github.com/willyhutw)
@ -23,9 +60,47 @@
* **Dashboards**: Version cleanup fails on old databases with many entries [#11278](https://github.com/grafana/grafana/issues/11278)
* **Server**: Adjust permissions of unix socket [#11343](https://github.com/grafana/grafana/pull/11343), thx [@corny](https://github.com/corny)
* **Shortcuts**: Add shortcut for duplicate panel [#11102](https://github.com/grafana/grafana/issues/11102)
* **AuthProxy**: Support IPv6 in Auth proxy white list [#11330](https://github.com/grafana/grafana/pull/11330), thx [@corny](https://github.com/corny)
* **SMTP**: Don't connect to STMP server using TLS unless configured. [#7189](https://github.com/grafana/grafana/issues/7189)
* **Prometheus**: Escape backslash in labels correctly. [#10555](https://github.com/grafana/grafana/issues/10555), thx [@roidelapluie](https://github.com/roidelapluie)
* **Variables**: Case-insensitive sorting for template values [#11128](https://github.com/grafana/grafana/issues/11128) thx [@cross](https://github.com/cross)
* **Annotations (native)**: Change default limit from 10 to 100 when querying api [#11569](https://github.com/grafana/grafana/issues/11569), thx [@flopp999](https://github.com/flopp999)
* **MySQL/Postgres/MSSQL**: PostgreSQL datasource generates invalid query with dates before 1970 [#11530](https://github.com/grafana/grafana/issues/11530) thx [@ryantxu](https://github.com/ryantxu)
* **Kiosk**: Adds url parameter for starting a dashboard in inactive mode [#11228](https://github.com/grafana/grafana/issues/11228), thx [@towolf](https://github.com/towolf)
* **Dashboard**: Enable closing timepicker using escape key [#11332](https://github.com/grafana/grafana/issues/11332)
* **Datasources**: Rename direct access mode in the data source settings [#11391](https://github.com/grafana/grafana/issues/11391)
* **Search**: Display dashboards in folder indented [#11073](https://github.com/grafana/grafana/issues/11073)
* **Units**: Use B/s instead Bps for Bytes per second [#9342](https://github.com/grafana/grafana/pull/9342), thx [@mayli](https://github.com/mayli)
* **Units**: Radiation units [#11001](https://github.com/grafana/grafana/issues/11001), thx [@victorclaessen](https://github.com/victorclaessen)
* **Units**: Timeticks unit [#11183](https://github.com/grafana/grafana/pull/11183), thx [@jtyr](https://github.com/jtyr)
* **Units**: Concentration units and "Normal cubic metre" [#11211](https://github.com/grafana/grafana/issues/11211), thx [@flopp999](https://github.com/flopp999)
* **Units**: New currency - Czech koruna [#11384](https://github.com/grafana/grafana/pull/11384), thx [@Rohlik](https://github.com/Rohlik)
* **Avatar**: Fix DISABLE_GRAVATAR option [#11095](https://github.com/grafana/grafana/issues/11095)
* **Heatmap**: Disable log scale when using time time series buckets [#10792](https://github.com/grafana/grafana/issues/10792)
* **Provisioning**: Remove `id` from json when provisioning dashboards, [#11138](https://github.com/grafana/grafana/issues/11138)
* **Prometheus**: tooltip for legend format not showing properly [#11516](https://github.com/grafana/grafana/issues/11516), thx [@svenklemm](https://github.com/svenklemm)
* **Playlist**: Empty playlists cannot be deleted [#11133](https://github.com/grafana/grafana/issues/11133), thx [@kichristensen](https://github.com/kichristensen)
* **Switch Orgs**: Alphabetic order in Switch Organization modal [#11556](https://github.com/grafana/grafana/issues/11556)
* **Postgres**: improve `$__timeFilter` macro [#11578](https://github.com/grafana/grafana/issues/11578), thx [@svenklemm](https://github.com/svenklemm)
* **Permission list**: Improved ux [#10747](https://github.com/grafana/grafana/issues/10747)
* **Dashboard**: Sizing and positioning of settings menu icons [#11572](https://github.com/grafana/grafana/pull/11572)
* **Dashboard**: Add search filter/tabs to new panel control [#10427](https://github.com/grafana/grafana/issues/10427)
* **Folders**: User with org viewer role should not be able to save/move dashboards in/to general folder [#11553](https://github.com/grafana/grafana/issues/11553)
* **Influxdb**: Dont assume the first column in table response is time. [#11476](https://github.com/grafana/grafana/issues/11476), thx [@hahnjo](https://github.com/hahnjo)
# 5.0.4 (unreleased)
* **Dashboard** Fixed bug where collapsed panels could not be directly linked to/renderer [#11114](https://github.com/grafana/grafana/issues/11114) & [#11086](https://github.com/grafana/grafana/issues/11086)
### Tech
* Backend code simplification [#11613](https://github.com/grafana/grafana/pull/11613), thx [@knweiss](https://github.com/knweiss)
* Add codespell to CI [#11602](https://github.com/grafana/grafana/pull/11602), thx [@mjtrangoni](https://github.com/mjtrangoni)
* Migrated JavaScript files to TypeScript
# 5.0.4 (2018-03-28)
* **Docker** Can't start Grafana on Kubernetes 1.7.14, 1.8.9, or 1.9.4 [#140 in grafana-docker repo](https://github.com/grafana/grafana-docker/issues/140) thx [@suquant](https://github.com/suquant)
* **Dashboard** Fixed bug where collapsed panels could not be directly linked to/renderer [#11114](https://github.com/grafana/grafana/issues/11114) & [#11086](https://github.com/grafana/grafana/issues/11086) & [#11296](https://github.com/grafana/grafana/issues/11296)
* **Dashboard** Provisioning dashboard with alert rules should create alerts [#11247](https://github.com/grafana/grafana/issues/11247)
* **Snapshots** For snapshots, the Graph panel renders the legend incorrectly on right hand side [#11318](https://github.com/grafana/grafana/issues/11318)
* **Alerting** Link back to Grafana returns wrong URL if root_path contains sub-path components [#11403](https://github.com/grafana/grafana/issues/11403)
* **Alerting** Incorrect default value for upload images setting for alert notifiers [#11413](https://github.com/grafana/grafana/pull/11413)
# 5.0.3 (2018-03-16)
* **Mysql**: Mysql panic occurring occasionally upon Grafana dashboard access (a bigger patch than the one in 5.0.2) [#11155](https://github.com/grafana/grafana/issues/11155)
@ -44,7 +119,7 @@
* **Dashboards**: Changing templated value from dropdown is causing unsaved changes [#11063](https://github.com/grafana/grafana/issues/11063)
* **Prometheus**: Fixes bundled Prometheus 2.0 dashboard [#11016](https://github.com/grafana/grafana/issues/11016), thx [@roidelapluie](https://github.com/roidelapluie)
* **Sidemenu**: Profile menu "invisible" when gravatar is disabled [#11097](https://github.com/grafana/grafana/issues/11097)
* **Dashboard**: Fixes a bug with resizeable handles for panels [#11103](https://github.com/grafana/grafana/issues/11103)
* **Dashboard**: Fixes a bug with resizable handles for panels [#11103](https://github.com/grafana/grafana/issues/11103)
* **Alerting**: Telegram inline image mode fails when caption too long [#10975](https://github.com/grafana/grafana/issues/10975)
* **Alerting**: Fixes silent failing validation [#11145](https://github.com/grafana/grafana/pull/11145)
* **OAuth**: Only use jwt token if it contains an email address [#11127](https://github.com/grafana/grafana/pull/11127)
@ -108,7 +183,7 @@ Grafana v5.0 is going to be the biggest and most foundational release Grafana ha
### New Major Features
- **Dashboards** Dashboard folders, [#1611](https://github.com/grafana/grafana/issues/1611)
- **Teams** User groups (teams) implemented. Can be used in folder & dashboard permission list.
- **Dashboard grid**: Panels are now layed out in a two dimensional grid (with x, y, w, h). [#9093](https://github.com/grafana/grafana/issues/9093).
- **Dashboard grid**: Panels are now laid out in a two dimensional grid (with x, y, w, h). [#9093](https://github.com/grafana/grafana/issues/9093).
- **Templating**: Vertical repeat direction for panel repeats.
- **UX**: Major update to page header and navigation
- **Dashboard settings**: Combine dashboard settings views into one with side menu, [#9750](https://github.com/grafana/grafana/issues/9750)
@ -142,7 +217,7 @@ Dashboard panels and rows are positioned using a gridPos object `{x: 0, y: 0, w:
* **Dashboard history**: New config file option versions_to_keep sets how many versions per dashboard to store, [#9671](https://github.com/grafana/grafana/issues/9671)
* **Dashboard as cfg**: Load dashboards from file into Grafana on startup/change [#9654](https://github.com/grafana/grafana/issues/9654) [#5269](https://github.com/grafana/grafana/issues/5269)
* **Prometheus**: Grafana can now send alerts to Prometheus Alertmanager while firing [#7481](https://github.com/grafana/grafana/issues/7481), thx [@Thib17](https://github.com/Thib17) and [@mtanda](https://github.com/mtanda)
* **Table**: Support multiple table formated queries in table panel [#9170](https://github.com/grafana/grafana/issues/9170), thx [@davkal](https://github.com/davkal)
* **Table**: Support multiple table formatted queries in table panel [#9170](https://github.com/grafana/grafana/issues/9170), thx [@davkal](https://github.com/davkal)
* **Security**: Protect against brute force (frequent) login attempts [#7616](https://github.com/grafana/grafana/issues/7616)
## Minor
@ -164,7 +239,7 @@ Dashboard panels and rows are positioned using a gridPos object `{x: 0, y: 0, w:
* **Sensu**: Send alert message to sensu output [#9551](https://github.com/grafana/grafana/issues/9551), thx [@cjchand](https://github.com/cjchand)
* **Singlestat**: suppress error when result contains no datapoints [#9636](https://github.com/grafana/grafana/issues/9636), thx [@utkarshcmu](https://github.com/utkarshcmu)
* **Postgres/MySQL**: Control quoting in SQL-queries when using template variables [#9030](https://github.com/grafana/grafana/issues/9030), thanks [@svenklemm](https://github.com/svenklemm)
* **Pagerduty**: Pagerduty dont auto resolve incidents by default anymore. [#10222](https://github.com/grafana/grafana/issues/10222)
* **Pagerduty**: Pagerduty don't auto resolve incidents by default anymore. [#10222](https://github.com/grafana/grafana/issues/10222)
* **Cloudwatch**: Fix for multi-valued templated queries. [#9903](https://github.com/grafana/grafana/issues/9903)
## Tech
@ -242,7 +317,7 @@ The following properties have been deprecated and will be removed in a future re
* **Annotations**: Add support for creating annotations from graph panel [#8197](https://github.com/grafana/grafana/pull/8197)
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Graph**: Add support for local formatting in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/2764)
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)
@ -279,7 +354,7 @@ The following properties have been deprecated and will be removed in a future re
* **Graphite**: Fix for Grafana internal metrics to Graphite sending NaN values [#9279](https://github.com/grafana/grafana/issues/9279)
* **HTTP API**: Fix for HEAD method requests [#9307](https://github.com/grafana/grafana/issues/9307)
* **Templating**: Fix for duplicate template variable queries when refresh is set to time range change [#9185](https://github.com/grafana/grafana/issues/9185)
* **Metrics**: dont write NaN values to graphite [#9279](https://github.com/grafana/grafana/issues/9279)
* **Metrics**: don't write NaN values to graphite [#9279](https://github.com/grafana/grafana/issues/9279)
# 4.5.1 (2017-09-15)
@ -316,12 +391,12 @@ The following properties have been deprecated and will be removed in a future re
### Breaking change
* **InfluxDB/Elasticsearch**: The panel & data source option named "Group by time interval" is now named "Min time interval" and does now always define a lower limit for the auto group by time. Without having to use `>` prefix (that prefix still works). This should in theory have close to zero actual impact on existing dashboards. It does mean that if you used this setting to define a hard group by time interval of, say "1d", if you zoomed to a time range wide enough the time range could increase above the "1d" range as the setting is now always considered a lower limit.
* **Elasticsearch**: Elasticsearch metric queries without date histogram now return table formated data making table panel much easier to use for this use case. Should not break/change existing dashboards with stock panels but external panel plugins can be affected.
* **Elasticsearch**: Elasticsearch metric queries without date histogram now return table formatted data making table panel much easier to use for this use case. Should not break/change existing dashboards with stock panels but external panel plugins can be affected.
## Changes
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
* **InfluxDB**: Added parenthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
## Bug Fixes
@ -333,7 +408,7 @@ The following properties have been deprecated and will be removed in a future re
## Bug Fixes
* **Search**: Fix for issue that casued search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
* **Search**: Fix for issue that caused search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
* **Modals**: ESC key now closes modal again, fixes [#8981](https://github.com/grafana/grafana/issues/8988), thx [@j-white](https://github.com/j-white)
# 4.4.2 (2017-08-01)
@ -672,12 +747,12 @@ due to too many connections/file handles on the data source backend. This proble
### Enhancements
* **Login**: Adds option to disable username/password logins, closes [#4674](https://github.com/grafana/grafana/issues/4674)
* **SingleStat**: Add seriename as option in singlestat panel, closes [#4740](https://github.com/grafana/grafana/issues/4740)
* **Localization**: Week start day now dependant on browser locale setting, closes [#3003](https://github.com/grafana/grafana/issues/3003)
* **Localization**: Week start day now dependent on browser locale setting, closes [#3003](https://github.com/grafana/grafana/issues/3003)
* **Templating**: Update panel repeats for variables that change on time refresh, closes [#5021](https://github.com/grafana/grafana/issues/5021)
* **Templating**: Add support for numeric and alphabetical sorting of variable values, closes [#2839](https://github.com/grafana/grafana/issues/2839)
* **Elasticsearch**: Support to set Precision Threshold for Unique Count metric, closes [#4689](https://github.com/grafana/grafana/issues/4689)
* **Navigation**: Add search to org swithcer, closes [#2609](https://github.com/grafana/grafana/issues/2609)
* **Database**: Allow database config using one propertie, closes [#5456](https://github.com/grafana/grafana/pull/5456)
* **Database**: Allow database config using one property, closes [#5456](https://github.com/grafana/grafana/pull/5456)
* **Graphite**: Add support for groupByNodes, closes [#5613](https://github.com/grafana/grafana/pull/5613)
* **Influxdb**: Add support for elapsed(), closes [#5827](https://github.com/grafana/grafana/pull/5827)
* **OpenTSDB**: Add support for explicitTags for OpenTSDB>=2.3, closes [#6360](https://github.com/grafana/grafana/pull/6361)
@ -744,7 +819,7 @@ due to too many connections/file handles on the data source backend. This proble
* **Datasource**: Pending data source requests are cancelled before new ones are issues (Graphite & Prometheus), closes [#5321](https://github.com/grafana/grafana/issues/5321)
### Breaking changes
* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log ouput.
* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log output.
* **Graphite** : The Graph panel no longer have a Graphite PNG option. closes [#5367](https://github.com/grafana/grafana/issues/5367)
### Bug fixes
@ -762,7 +837,7 @@ due to too many connections/file handles on the data source backend. This proble
* **Annotations**: Annotations can now use a template variable as data source, closes [#5054](https://github.com/grafana/grafana/issues/5054)
* **Time picker**: Fixed issue timepicker and UTC when reading time from URL, fixes [#5078](https://github.com/grafana/grafana/issues/5078)
* **CloudWatch**: Support for Multiple Account by AssumeRole, closes [#3522](https://github.com/grafana/grafana/issues/3522)
* **Singlestat**: Fixed alignment and minium height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
* **Singlestat**: Fixed alignment and minimum height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
* **Share modal**: Fixed link when using grafana under dashboard sub url, fixes [#5109](https://github.com/grafana/grafana/issues/5109)
* **Prometheus**: Fixed bug in query editor that caused it not to load when reloading page, fixes [#5107](https://github.com/grafana/grafana/issues/5107)
* **Elasticsearch**: Fixed bug when template variable query returns numeric values, fixes [#5097](https://github.com/grafana/grafana/issues/5097), fixes [#5088](https://github.com/grafana/grafana/issues/5088)
@ -779,7 +854,7 @@ due to too many connections/file handles on the data source backend. This proble
* **Graph**: Fixed broken PNG rendering in graph panel, fixes [#5025](https://github.com/grafana/grafana/issues/5025)
* **Graph**: Fixed broken xaxis on graph panel, fixes [#5024](https://github.com/grafana/grafana/issues/5024)
* **Influxdb**: Fixes crash when hiding middle serie, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
* **Influxdb**: Fixes crash when hiding middle series, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
# 3.0.1 Stable (2016-05-11)
@ -791,7 +866,7 @@ due to too many connections/file handles on the data source backend. This proble
### Bug fixes
* **Dashboard title**: Fixed max dashboard title width (media query) for large screens, fixes [#4859](https://github.com/grafana/grafana/issues/4859)
* **Annotations**: Fixed issue with entering annotation edit view, fixes [#4857](https://github.com/grafana/grafana/issues/4857)
* **Remove query**: Fixed issue with removing query for data sources without collapsable query editors, fixes [#4856](https://github.com/grafana/grafana/issues/4856)
* **Remove query**: Fixed issue with removing query for data sources without collapsible query editors, fixes [#4856](https://github.com/grafana/grafana/issues/4856)
* **Graphite PNG**: Fixed issue graphite png rendering option, fixes [#4864](https://github.com/grafana/grafana/issues/4864)
* **InfluxDB**: Fixed issue missing plus group by iconn, fixes [#4862](https://github.com/grafana/grafana/issues/4862)
* **Graph**: Fixes missing line mode for thresholds, fixes [#4902](https://github.com/grafana/grafana/pull/4902)
@ -807,11 +882,11 @@ due to too many connections/file handles on the data source backend. This proble
### Bug fixes
* **InfluxDB 0.12**: Fixed issue templating and `show tag values` query only returning tags for first measurement, fixes [#4726](https://github.com/grafana/grafana/issues/4726)
* **Templating**: Fixed issue with regex formating when matching multiple values, fixes [#4755](https://github.com/grafana/grafana/issues/4755)
* **Templating**: Fixed issue with regex formatting when matching multiple values, fixes [#4755](https://github.com/grafana/grafana/issues/4755)
* **Templating**: Fixed issue with custom all value and escaping, fixes [#4736](https://github.com/grafana/grafana/issues/4736)
* **Dashlist**: Fixed issue dashboard list panel and caching tags, fixes [#4768](https://github.com/grafana/grafana/issues/4768)
* **Graph**: Fixed issue with unneeded scrollbar in legend for Firefox, fixes [#4760](https://github.com/grafana/grafana/issues/4760)
* **Table panel**: Fixed issue table panel formating string array properties, fixes [#4791](https://github.com/grafana/grafana/issues/4791)
* **Table panel**: Fixed issue table panel formatting string array properties, fixes [#4791](https://github.com/grafana/grafana/issues/4791)
* **grafana-cli**: Improve error message when failing to install plugins due to corrupt response, fixes [#4651](https://github.com/grafana/grafana/issues/4651)
* **Singlestat**: Fixes prefix an postfix for gauges, fixes [#4812](https://github.com/grafana/grafana/issues/4812)
* **Singlestat**: Fixes auto-refresh on change for some options, fixes [#4809](https://github.com/grafana/grafana/issues/4809)
@ -903,7 +978,7 @@ slack channel (link to slack channel in readme).
### Bug fixes
* **Playlist**: Fix for memory leak when running a playlist, closes [#3794](https://github.com/grafana/grafana/pull/3794)
* **InfluxDB**: Fix for InfluxDB and table panel when using Format As Table and having group by time, fixes [#3928](https://github.com/grafana/grafana/issues/3928)
* **Panel Time shift**: Fix for panel time range and using dashboard times liek `Today` and `This Week`, fixes [#3941](https://github.com/grafana/grafana/issues/3941)
* **Panel Time shift**: Fix for panel time range and using dashboard times like `Today` and `This Week`, fixes [#3941](https://github.com/grafana/grafana/issues/3941)
* **Row repeat**: Repeated rows will now appear next to each other and not by the bottom of the dashboard, fixes [#3942](https://github.com/grafana/grafana/issues/3942)
* **Png renderer**: Fix for phantomjs path on windows, fixes [#3657](https://github.com/grafana/grafana/issues/3657)
@ -927,7 +1002,7 @@ slack channel (link to slack channel in readme).
### Bug Fixes
* **metric editors**: Fix for clicking typeahead auto dropdown option, fixes [#3428](https://github.com/grafana/grafana/issues/3428)
* **influxdb**: Fixed issue showing Group By label only on first query, fixes [#3453](https://github.com/grafana/grafana/issues/3453)
* **logging**: Add more verbose info logging for http reqeusts, closes [#3405](https://github.com/grafana/grafana/pull/3405)
* **logging**: Add more verbose info logging for http requests, closes [#3405](https://github.com/grafana/grafana/pull/3405)
# 2.6.0-Beta1 (2015-12-04)
@ -954,7 +1029,7 @@ slack channel (link to slack channel in readme).
**New Feature: Mix data sources**
- A built in data source is now available named `-- Mixed --`, When picked in the metrics tab,
it allows you to add queries of differnet data source types & instances to the same graph/panel!
it allows you to add queries of different data source types & instances to the same graph/panel!
[Issue #436](https://github.com/grafana/grafana/issues/436)
**New Feature: Elasticsearch Metrics Query Editor and Viz Support**
@ -993,7 +1068,7 @@ it allows you to add queries of differnet data source types & instances to the s
- [Issue #2564](https://github.com/grafana/grafana/issues/2564). Templating: Another atempt at fixing #2534 (Init multi value template var used in repeat panel from url)
- [Issue #2620](https://github.com/grafana/grafana/issues/2620). Graph: multi series tooltip did no highlight correct point when stacking was enabled and series were of different resolution
- [Issue #2636](https://github.com/grafana/grafana/issues/2636). InfluxDB: Do no show template vars in dropdown for tag keys and group by keys
- [Issue #2604](https://github.com/grafana/grafana/issues/2604). InfluxDB: More alias options, can now use `$[0-9]` syntax to reference part of a measurement name (seperated by dots)
- [Issue #2604](https://github.com/grafana/grafana/issues/2604). InfluxDB: More alias options, can now use `$[0-9]` syntax to reference part of a measurement name (separated by dots)
**Breaking Changes**
- Notice to makers/users of custom data sources, there is a minor breaking change in 2.2 that
@ -1075,7 +1150,7 @@ Grunt & Watch tasks:
- [Issue #1826](https://github.com/grafana/grafana/issues/1826). User role 'Viewer' are now prohibited from entering edit mode (and doing other transient dashboard edits). A new role `Read Only Editor` will replace the old Viewer behavior
- [Issue #1928](https://github.com/grafana/grafana/issues/1928). HTTP API: GET /api/dashboards/db/:slug response changed property `model` to `dashboard` to match the POST request nameing
- Backend render URL changed from `/render/dashboard/solo` `render/dashboard-solo/` (in order to have consistent dashboard url `/dashboard/:type/:slug`)
- Search HTTP API response has changed (simplified), tags list moved to seperate HTTP resource URI
- Search HTTP API response has changed (simplified), tags list moved to separate HTTP resource URI
- Datasource HTTP api breaking change, ADD datasource is now POST /api/datasources/, update is now PUT /api/datasources/:id
**Fixes**
@ -1092,7 +1167,7 @@ Grunt & Watch tasks:
# 2.0.2 (2015-04-22)
**Fixes**
- [Issue #1832](https://github.com/grafana/grafana/issues/1832). Graph Panel + Legend Table mode: Many series casued zero height graph, now legend will never reduce the height of the graph below 50% of row height.
- [Issue #1832](https://github.com/grafana/grafana/issues/1832). Graph Panel + Legend Table mode: Many series caused zero height graph, now legend will never reduce the height of the graph below 50% of row height.
- [Issue #1846](https://github.com/grafana/grafana/issues/1846). Snapshots: Fixed issue with snapshoting dashboards with an interval template variable
- [Issue #1848](https://github.com/grafana/grafana/issues/1848). Panel timeshift: You can now use panel timeshift without a relative time override
@ -1134,7 +1209,7 @@ Grunt & Watch tasks:
**Fixes**
- [Issue #1649](https://github.com/grafana/grafana/issues/1649). HTTP API: grafana /render calls nows with api keys
- [Issue #1667](https://github.com/grafana/grafana/issues/1667). Datasource proxy & session timeout fix (casued 401 Unauthorized error after a while)
- [Issue #1667](https://github.com/grafana/grafana/issues/1667). Datasource proxy & session timeout fix (caused 401 Unauthorized error after a while)
- [Issue #1707](https://github.com/grafana/grafana/issues/1707). Unsaved changes: Do not show for snapshots, scripted and file based dashboards
- [Issue #1703](https://github.com/grafana/grafana/issues/1703). Unsaved changes: Do not show for users with role `Viewer`
- [Issue #1675](https://github.com/grafana/grafana/issues/1675). Data source proxy: Fixed issue with Gzip enabled and data source proxy
@ -1147,14 +1222,14 @@ Grunt & Watch tasks:
**Important Note**
Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated backend server. Please read the [Documentation](http://docs.grafana.org) for more detailed about this SIGNIFCANT change to Grafana
Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated backend server. Please read the [Documentation](http://docs.grafana.org) for more detailed about this SIGNIFICANT change to Grafana
**New features**
- [Issue #1623](https://github.com/grafana/grafana/issues/1623). Share Dashboard: Dashboard snapshot sharing (dash and data snapshot), save to local or save to public snapshot dashboard snapshots.raintank.io site
- [Issue #1622](https://github.com/grafana/grafana/issues/1622). Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embedd a single graph on another web site
- [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes inbetween the user is promted with a warning if he really wants to overwrite the other's changes
- [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes in between the user is promted with a warning if he really wants to overwrite the other's changes
- [Issue #1331](https://github.com/grafana/grafana/issues/1331). Graph & Singlestat: New axis/unit format selector and more units (kbytes, Joule, Watt, eV), and new design for graph axis & grid tab and single stat options tab views
- [Issue #1241](https://github.com/grafana/grafana/issues/1242). Timepicker: New option in timepicker (under dashboard settings), to change ``now`` to be for example ``now-1m``, usefull when you want to ignore last minute because it contains incomplete data
- [Issue #1241](https://github.com/grafana/grafana/issues/1242). Timepicker: New option in timepicker (under dashboard settings), to change ``now`` to be for example ``now-1m``, useful when you want to ignore last minute because it contains incomplete data
- [Issue #171](https://github.com/grafana/grafana/issues/171). Panel: Different time periods, panels can override dashboard relative time and/or add a time shift
- [Issue #1488](https://github.com/grafana/grafana/issues/1488). Dashboard: Clone dashboard / Save as
- [Issue #1458](https://github.com/grafana/grafana/issues/1458). User: persisted user option for dark or light theme (no longer an option on a dashboard)
@ -1185,7 +1260,7 @@ Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated
**OpenTSDB breaking change**
- [Issue #1438](https://github.com/grafana/grafana/issues/1438). OpenTSDB: Automatic downsample interval passed to OpenTSDB (depends on timespan and graph width)
- NOTICE, Downsampling is now enabled by default, so if you have not picked a downsample aggregator in your metric query do so or your graphs will be missleading
- NOTICE, Downsampling is now enabled by default, so if you have not picked a downsample aggregator in your metric query do so or your graphs will be misleading
- This will make Grafana a lot quicker for OpenTSDB users when viewing large time spans without having to change the downsample interval manually.
**Tech**
@ -1216,7 +1291,7 @@ Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated
- [Issue #1114](https://github.com/grafana/grafana/issues/1114). Graphite: Lexer fix, allow equal sign (=) in metric paths
- [Issue #1136](https://github.com/grafana/grafana/issues/1136). Graph: Fix to legend value Max and negative values
- [Issue #1150](https://github.com/grafana/grafana/issues/1150). SinglestatPanel: Fixed absolute drilldown link issue
- [Issue #1123](https://github.com/grafana/grafana/issues/1123). Firefox: Workaround for Firefox bug, casued input text fields to not be selectable and not have placeable cursor
- [Issue #1123](https://github.com/grafana/grafana/issues/1123). Firefox: Workaround for Firefox bug, caused input text fields to not be selectable and not have placeable cursor
- [Issue #1108](https://github.com/grafana/grafana/issues/1108). Graph: Fix for tooltip series order when series draw order was changed with zindex property
# 1.9.0-rc1 (2014-11-17)
@ -1293,7 +1368,7 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #234](https://github.com/grafana/grafana/issues/234). Templating: Interval variable type for time intervals summarize/group by parameter, included "auto" option, and auto step counts option.
- [Issue #262](https://github.com/grafana/grafana/issues/262). Templating: Ability to use template variables for function parameters via custom variable type, can be used as parameter for movingAverage or scaleToSeconds for example
- [Issue #312](https://github.com/grafana/grafana/issues/312). Templating: Can now use template variables in panel titles
- [Issue #613](https://github.com/grafana/grafana/issues/613). Templating: Full support for InfluxDB, filter by part of series names, extract series substrings, nested queries, multipe where clauses!
- [Issue #613](https://github.com/grafana/grafana/issues/613). Templating: Full support for InfluxDB, filter by part of series names, extract series substrings, nested queries, multiple where clauses!
- Template variables can be initialized from url, with var-my_varname=value, breaking change, before it was just my_varname.
- Templating and url state sync has some issues that are not solved for this release, see [Issue #772](https://github.com/grafana/grafana/issues/772) for more details.
@ -1382,7 +1457,7 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #136](https://github.com/grafana/grafana/issues/136). Graph: New legend display option "Align as table"
- [Issue #556](https://github.com/grafana/grafana/issues/556). Graph: New legend display option "Right side", will show legend to the right of the graph
- [Issue #604](https://github.com/grafana/grafana/issues/604). Graph: New axis format, 'bps' (SI unit in steps of 1000) useful for network gear metics
- [Issue #626](https://github.com/grafana/grafana/issues/626). Graph: Downscale y axis to more precise unit, value of 0.1 for seconds format will be formated as 100 ms. Thanks @kamaradclimber
- [Issue #626](https://github.com/grafana/grafana/issues/626). Graph: Downscale y axis to more precise unit, value of 0.1 for seconds format will be formatted as 100 ms. Thanks @kamaradclimber
- [Issue #618](https://github.com/grafana/grafana/issues/618). OpenTSDB: Series alias option to override metric name returned from opentsdb. Thanks @heldr
**Documentation**
@ -1412,13 +1487,13 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #522](https://github.com/grafana/grafana/issues/522). Series names and column name typeahead cache fix
- [Issue #504](https://github.com/grafana/grafana/issues/504). Fixed influxdb issue with raw query that caused wrong value column detection
- [Issue #526](https://github.com/grafana/grafana/issues/526). Default property that marks which datasource is default in config.js is now optional
- [Issue #342](https://github.com/grafana/grafana/issues/342). Auto-refresh caused 2 refreshes (and hence mulitple queries) each time (at least in firefox)
- [Issue #342](https://github.com/grafana/grafana/issues/342). Auto-refresh caused 2 refreshes (and hence multiple queries) each time (at least in firefox)
# 1.6.0 (2014-06-16)
#### New features or improvements
- [Issue #427](https://github.com/grafana/grafana/issues/427). New Y-axis formater for metric values that represent seconds, Thanks @jippi
- [Issue #390](https://github.com/grafana/grafana/issues/390). Allow special characters in serie names (influxdb datasource), Thanks @majst01
- [Issue #390](https://github.com/grafana/grafana/issues/390). Allow special characters in series names (influxdb datasource), Thanks @majst01
- [Issue #428](https://github.com/grafana/grafana/issues/428). Refactoring of filterSrv, Thanks @Tetha
- [Issue #445](https://github.com/grafana/grafana/issues/445). New config for playlist feature. Set playlist_timespan to set default playlist interval, Thanks @rmca
- [Issue #461](https://github.com/grafana/grafana/issues/461). New graphite function definition added isNonNull, Thanks @tmonk42
@ -1439,13 +1514,13 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #475](https://github.com/grafana/grafana/issues/475). Add panel icon and Row edit button is replaced by the Row edit menu
- New graphs now have a default empty query
- Add Row button now creates a row with default height of 250px (no longer opens dashboard settings modal)
- Clean up of config.sample.js, graphiteUrl removed (still works, but depricated, removed in future)
- Clean up of config.sample.js, graphiteUrl removed (still works, but deprecated, removed in future)
Use datasources config instead. panel_names removed from config.js. Use plugins.panels to add custom panels
- Graphite panel is now renamed graph (Existing dashboards will still work)
#### Fixes
- [Issue #126](https://github.com/grafana/grafana/issues/126). Graphite query lexer change, can now handle regex parameters for aliasSub function
- [Issue #447](https://github.com/grafana/grafana/issues/447). Filter option loading when having muliple nested filters now works better. Options are now reloaded correctly and there are no multiple renders/refresh inbetween.
- [Issue #447](https://github.com/grafana/grafana/issues/447). Filter option loading when having muliple nested filters now works better. Options are now reloaded correctly and there are no multiple renders/refresh in between.
- [Issue #412](https://github.com/grafana/grafana/issues/412). After a filter option is changed and a nested template param is reloaded, if the current value exists after the options are reloaded the current selected value is kept.
- [Issue #460](https://github.com/grafana/grafana/issues/460). Legend Current value did not display when value was zero
- [Issue #328](https://github.com/grafana/grafana/issues/328). Fix to series toggling bug that caused annotations to be hidden when toggling/hiding series.

32
Gopkg.lock generated
View File

@ -111,6 +111,18 @@
]
revision = "270bc3860bb94dd3a3ffd047377d746c5e276726"
[[projects]]
branch = "master"
name = "github.com/facebookgo/inject"
packages = ["."]
revision = "cc1aa653e50f6a9893bcaef89e673e5b24e1e97b"
[[projects]]
branch = "master"
name = "github.com/facebookgo/structtag"
packages = ["."]
revision = "217e25fb96916cc60332e399c9aa63f5c422ceed"
[[projects]]
name = "github.com/fatih/color"
packages = ["."]
@ -351,6 +363,12 @@
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
version = "v2.1.0"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/prometheus/client_golang"
packages = [
@ -610,12 +628,6 @@
revision = "567b2bfa514e796916c4747494d6ff5132a1dfce"
version = "v1"
[[projects]]
branch = "v2"
name = "gopkg.in/gomail.v2"
packages = ["."]
revision = "81ebce5c23dfd25c6c67194b37d3dd3f338c98b1"
[[projects]]
name = "gopkg.in/ini.v1"
packages = ["."]
@ -628,6 +640,12 @@
revision = "75f2e9b42e99652f0d82b28ccb73648f44615faa"
version = "v1.2.4"
[[projects]]
branch = "v2"
name = "gopkg.in/mail.v2"
packages = ["."]
revision = "5bc5c8bb07bd8d2803831fbaf8cbd630fcde2c68"
[[projects]]
name = "gopkg.in/redis.v2"
packages = ["."]
@ -643,6 +661,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "8a9e651fb8ea49dfd3c6ddc99bd3242b39e453ea9edd11321da79bd2c865e9d1"
inputs-digest = "bd54a1a836599d90b36d4ac1af56d716ef9ca5be4865e217bddd49e3d32a1997"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -172,7 +172,7 @@ ignored = [
name = "golang.org/x/sync"
[[constraint]]
name = "gopkg.in/gomail.v2"
name = "gopkg.in/mail.v2"
branch = "v2"
[[constraint]]

View File

@ -1,3 +1,5 @@
-include local/Makefile
all: deps build
deps-go:

View File

@ -9,6 +9,7 @@ upgrading Grafana please check here before creating an issue.
- [Datasource plugin written in typescript](https://github.com/grafana/typescript-template-datasource)
- [Simple json dataource plugin](https://github.com/grafana/simple-json-datasource)
- [Plugin development guide](http://docs.grafana.org/plugins/developing/development/)
- [Webpack Grafana plugin template project](https://github.com/CorpGlory/grafana-plugin-template-webpack)
## Changes in v4.6

View File

@ -39,12 +39,21 @@ go run build.go build
For this you need nodejs (v.6+).
To build the assets, rebuild on file change, and serve them by Grafana's webserver (http://localhost:3000):
```bash
npm install -g yarn
yarn install --pure-lockfile
npm run watch
```
Build the assets, rebuild on file change with Hot Module Replacement (HMR), and serve them by webpack-dev-server (http://localhost:3333):
```bash
yarn start
# OR set a theme
env GRAFANA_THEME=light yarn start
```
Note: HMR for Angular is not supported. If you edit files in the Angular part of the app, the whole page will reload.
Run tests
```bash
npm run jest
@ -55,6 +64,8 @@ Run karma tests
npm run karma
```
Run
### Recompile backend on source change
To rebuild on source change.

View File

@ -1,26 +1,20 @@
# Roadmap (2018-02-22)
# Roadmap (2018-05-06)
This roadmap is a tentative plan for the core development team. Things change constantly as PRs come in and priorities change.
But it will give you an idea of our current vision and plan.
### Short term (1-2 months)
- v5.1
- Build speed improvements & integration test execution
- Kubernetes friendly docker container
- Enterprise LDAP
- Provisioning workflow
- MSSQL datasource
- Elasticsearch alerting
- Crossplatform builds
- Backend service refactorings
- Explore UI
- First login registration view
### Mid term (2-4 months)
- v5.2
- Azure monitor backend rewrite
- Elasticsearch alerting
- First login registration view
- Backend plugins? (alert notifiers, auth)
- Crossplatform builds
- IFQL Initial support
- Multi-Stat panel
- React Panels
- Templating Query Editor UI Plugin hook
### Long term (4 - 8 months)

View File

@ -6,8 +6,8 @@ clone_folder: c:\gopath\src\github.com\grafana\grafana
environment:
nodejs_version: "6"
GOPATH: c:\gopath
GOVERSION: 1.9.2
GOPATH: C:\gopath
GOVERSION: 1.10
install:
- rmdir c:\go /s /q

109
build.go
View File

@ -16,7 +16,6 @@ import (
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
@ -24,14 +23,14 @@ import (
)
var (
versionRe = regexp.MustCompile(`-[0-9]{1,3}-g[0-9a-f]{5,10}`)
goarch string
goos string
gocc string
gocxx string
cgo string
pkgArch string
version string = "v1"
//versionRe = regexp.MustCompile(`-[0-9]{1,3}-g[0-9a-f]{5,10}`)
goarch string
goos string
gocc string
gocxx string
cgo string
pkgArch string
version string = "v1"
// deb & rpm does not support semver so have to handle their version a little differently
linuxPackageVersion string = "v1"
linuxPackageIteration string = ""
@ -41,10 +40,10 @@ var (
includeBuildNumber bool = true
buildNumber int = 0
binaries []string = []string{"grafana-server", "grafana-cli"}
isDev bool = false
enterprise bool = false
)
const minGoVersion = 1.8
func main() {
log.SetOutput(os.Stdout)
log.SetFlags(0)
@ -60,7 +59,9 @@ func main() {
flag.StringVar(&phjsToRelease, "phjs", "", "PhantomJS binary")
flag.BoolVar(&race, "race", race, "Use race detector")
flag.BoolVar(&includeBuildNumber, "includeBuildNumber", includeBuildNumber, "IncludeBuildNumber in package name")
flag.BoolVar(&enterprise, "enterprise", enterprise, "Build enterprise version of Grafana")
flag.IntVar(&buildNumber, "buildNumber", 0, "Build number from CI system")
flag.BoolVar(&isDev, "dev", isDev, "optimal for development, skips certain steps")
flag.Parse()
readVersionFromPackageJson()
@ -284,19 +285,33 @@ func createPackage(options linuxPackageOptions) {
"-s", "dir",
"--description", "Grafana",
"-C", packageRoot,
"--vendor", "Grafana",
"--url", "https://grafana.com",
"--license", "\"Apache 2.0\"",
"--maintainer", "contact@grafana.com",
"--config-files", options.initdScriptFilePath,
"--config-files", options.etcDefaultFilePath,
"--config-files", options.systemdServiceFilePath,
"--after-install", options.postinstSrc,
"--name", "grafana",
"--version", linuxPackageVersion,
"-p", "./dist",
}
name := "grafana"
if enterprise {
name += "-enterprise"
}
args = append(args, "--name", name)
description := "Grafana"
if enterprise {
description += " Enterprise"
}
args = append(args, "--vendor", description)
if !enterprise {
args = append(args, "--license", "\"Apache 2.0\"")
}
if options.packageType == "rpm" {
args = append(args, "--rpm-posttrans", "packaging/rpm/control/posttrans")
}
@ -324,20 +339,6 @@ func createPackage(options linuxPackageOptions) {
runPrint("fpm", append([]string{"-t", options.packageType}, args...)...)
}
func verifyGitRepoIsClean() {
rs, err := runError("git", "ls-files", "--modified")
if err != nil {
log.Fatalf("Failed to check if git tree was clean, %v, %v\n", string(rs), err)
return
}
count := len(string(rs))
if count > 0 {
log.Fatalf("Git repository has modified files, aborting")
}
log.Println("Git repository is clean")
}
func ensureGoPath() {
if os.Getenv("GOPATH") == "" {
cwd, err := os.Getwd()
@ -350,10 +351,6 @@ func ensureGoPath() {
}
}
func ChangeWorkingDir(dir string) {
os.Chdir(dir)
}
func grunt(params ...string) {
if runtime.GOOS == "windows" {
runPrint(`.\node_modules\.bin\grunt`, params...)
@ -394,7 +391,9 @@ func build(binaryName, pkg string, tags []string) {
binary += ".exe"
}
rmr(binary, binary+".md5")
if !isDev {
rmr(binary, binary+".md5")
}
args := []string{"build", "-ldflags", ldflags()}
if len(tags) > 0 {
args = append(args, "-tags", strings.Join(tags, ","))
@ -405,16 +404,21 @@ func build(binaryName, pkg string, tags []string) {
args = append(args, "-o", binary)
args = append(args, pkg)
setBuildEnv()
runPrint("go", "version")
if !isDev {
setBuildEnv()
runPrint("go", "version")
}
runPrint("go", args...)
// Create an md5 checksum of the binary, to be included in the archive for
// automatic upgrades.
err := md5File(binary)
if err != nil {
log.Fatal(err)
if !isDev {
// Create an md5 checksum of the binary, to be included in the archive for
// automatic upgrades.
err := md5File(binary)
if err != nil {
log.Fatal(err)
}
}
}
@ -424,6 +428,7 @@ func ldflags() string {
b.WriteString(fmt.Sprintf(" -X main.version=%s", version))
b.WriteString(fmt.Sprintf(" -X main.commit=%s", getGitSha()))
b.WriteString(fmt.Sprintf(" -X main.buildstamp=%d", buildStamp()))
b.WriteString(fmt.Sprintf(" -X main.enterprise=%t", enterprise))
return b.String()
}
@ -435,6 +440,10 @@ func rmr(paths ...string) {
}
func clean() {
if isDev {
return
}
rmr("dist")
rmr("tmp")
rmr(filepath.Join(os.Getenv("GOPATH"), fmt.Sprintf("pkg/%s_%s/github.com/grafana", goos, goarch)))
@ -479,24 +488,6 @@ func buildStamp() int64 {
return s
}
func buildArch() string {
os := goos
if os == "darwin" {
os = "macosx"
}
return fmt.Sprintf("%s-%s", os, goarch)
}
func run(cmd string, args ...string) []byte {
bs, err := runError(cmd, args...)
if err != nil {
log.Println(cmd, strings.Join(args, " "))
log.Println(string(bs))
log.Fatal(err)
}
return bytes.TrimSpace(bs)
}
func runError(cmd string, args ...string) ([]byte, error) {
ecmd := exec.Command(cmd, args...)
bs, err := ecmd.CombinedOutput()
@ -550,7 +541,7 @@ func shaFilesInDist() {
return nil
}
if strings.Contains(path, ".sha256") == false {
if !strings.Contains(path, ".sha256") {
err := shaFile(path)
if err != nil {
log.Printf("Failed to create sha file. error: %v\n", err)

View File

@ -8,6 +8,4 @@ coverage:
patch: yes
changes: no
comment:
layout: "diff"
behavior: "once"
comment: off

View File

@ -442,6 +442,11 @@ enabled = true
# Makes it possible to turn off alert rule execution but alerting UI is visible
execute_alerts = true
#################################### Explore #############################
[explore]
# Enable the Explore section
enabled = false
#################################### Internal Grafana Metrics ############
# Metrics available at HTTP API Url /metrics
[metrics]

View File

@ -64,7 +64,7 @@
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as seperate properties or as on string using the url propertie.
# as separate properties or as on string using the url properties.
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3
@ -377,6 +377,11 @@ log_queries =
# Makes it possible to turn off alert rule execution but alerting UI is visible
;execute_alerts = true
#################################### Explore #############################
[explore]
# Enable the Explore section
;enabled = false
#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /metrics
[metrics]

View File

@ -2,7 +2,7 @@
# http://localhost:3000 (Grafana running locally)
#
# Please note that you'll need to change the root_url in the Grafana configuration:
# root_url = %(protocol)s://%(domain)s:/grafana/
# root_url = %(protocol)s://%(domain)s:10081/grafana/
apacheproxy:
build: blocks/apache_proxy

View File

@ -38,7 +38,7 @@ CACHE_QUERY_PORT = 7002
LOG_UPDATES = False
# Enable AMQP if you want to receve metrics using an amqp broker
# Enable AMQP if you want to receive metrics using an amqp broker
# ENABLE_AMQP = False
# Verbose means a line will be logged for every metric received

View File

@ -41,7 +41,7 @@ PICKLE_RECEIVER_PORT = 2004
CACHE_QUERY_INTERFACE = 0.0.0.0
CACHE_QUERY_PORT = 7002
# Enable AMQP if you want to receve metrics using you amqp broker
# Enable AMQP if you want to receive metrics using you amqp broker
ENABLE_AMQP = True
# Verbose means a line will be logged for every metric received

View File

@ -265,7 +265,7 @@ WHISPER_FALLOCATE_CREATE = True
# CARBON_METRIC_PREFIX = carbon
# CARBON_METRIC_INTERVAL = 60
# Enable AMQP if you want to receve metrics using an amqp broker
# Enable AMQP if you want to receive metrics using an amqp broker
# ENABLE_AMQP = False
# Verbose means a line will be logged for every metric received

View File

@ -30,7 +30,7 @@ give_completer_focus = shift-space
# pertain only to specific metric types.
#
# The dashboard presents only metrics that fall into specified naming schemes
# defined in this file. This creates a simpler, more targetted view of the
# defined in this file. This creates a simpler, more targeted view of the
# data. The general form for defining a naming scheme is as follows:
#
#[Metric Type]

View File

@ -100,7 +100,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1521715844826,
"iteration": 1523320861623,
"links": [],
"panels": [
{
@ -443,7 +443,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -522,7 +526,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -601,7 +609,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -680,7 +692,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -759,7 +775,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -838,7 +858,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -927,7 +951,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1026,7 +1054,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1115,7 +1147,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1196,7 +1232,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1285,7 +1325,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1366,7 +1410,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1455,7 +1503,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1536,7 +1588,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1619,7 +1675,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1702,7 +1762,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1792,7 +1856,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1875,7 +1943,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1965,7 +2037,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2048,7 +2124,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2138,7 +2218,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2221,7 +2305,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2311,7 +2399,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2394,7 +2486,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": false,
@ -2504,5 +2600,5 @@
"timezone": "",
"title": "Microsoft SQL Server Data Source Test",
"uid": "GlAqcPgmz",
"version": 57
"version": 58
}

View File

@ -2,7 +2,7 @@
"__inputs": [
{
"name": "DS_MYSQL",
"label": "Mysql",
"label": "MySQL",
"description": "",
"type": "datasource",
"pluginId": "mysql",
@ -20,19 +20,19 @@
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
"version": "5.0.0"
},
{
"type": "datasource",
"id": "mysql",
"name": "MySQL",
"version": "1.0.0"
"version": "5.0.0"
},
{
"type": "panel",
"id": "table",
"name": "Table",
"version": ""
"version": "5.0.0"
}
],
"annotations": {
@ -53,7 +53,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1518602729468,
"iteration": 1523372133566,
"links": [],
"panels": [
{
@ -118,7 +118,7 @@
],
"thresholds": [],
"timeFrom": null,
"timeShift": "1h",
"timeShift": null,
"title": "Average logins / $summarize",
"tooltip": {
"shared": true,
@ -150,7 +150,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -204,7 +208,7 @@
],
"thresholds": [],
"timeFrom": null,
"timeShift": "1h",
"timeShift": null,
"title": "Average payments started/ended / $summarize",
"tooltip": {
"shared": true,
@ -236,7 +240,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -284,7 +292,7 @@
],
"thresholds": [],
"timeFrom": null,
"timeShift": "1h",
"timeShift": null,
"title": "Max CPU / $summarize",
"tooltip": {
"shared": true,
@ -316,7 +324,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"columns": [],
@ -369,7 +381,7 @@
"target": ""
}
],
"timeShift": "1h",
"timeShift": null,
"title": "Values",
"transform": "table",
"type": "table"
@ -428,7 +440,6 @@
"auto_count": 5,
"auto_min": "10s",
"current": {
"selected": true,
"text": "1m",
"value": "1m"
},
@ -545,5 +556,5 @@
"timezone": "",
"title": "Grafana Fake Data Gen - MySQL",
"uid": "DGsCac3kz",
"version": 6
"version": 8
}

View File

@ -7,9 +7,6 @@
MYSQL_PASSWORD: password
ports:
- "3306:3306"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all]
fake-mysql-data:

View File

@ -0,0 +1,3 @@
FROM mysql:latest
ADD setup.sql /docker-entrypoint-initdb.d
CMD ["mysqld"]

View File

@ -7,14 +7,6 @@
"type": "datasource",
"pluginId": "mysql",
"pluginName": "MySQL"
},
{
"name": "DS_MSSQL_TEST",
"label": "MSSQL Test",
"description": "",
"type": "datasource",
"pluginId": "mssql",
"pluginName": "Microsoft SQL Server"
}
],
"__requires": [
@ -30,12 +22,6 @@
"name": "Graph",
"version": "5.0.0"
},
{
"type": "datasource",
"id": "mssql",
"name": "Microsoft SQL Server",
"version": "1.0.0"
},
{
"type": "datasource",
"id": "mysql",
@ -114,7 +100,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1521715720483,
"iteration": 1523320712115,
"links": [],
"panels": [
{
@ -349,7 +335,7 @@
{
"alias": "Time",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "time_sec",
"pattern": "time",
"type": "date"
},
{
@ -457,7 +443,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -536,7 +526,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -615,7 +609,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -694,7 +692,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -773,7 +775,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -852,7 +858,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -941,7 +951,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1034,7 +1048,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1123,7 +1141,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1204,7 +1226,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1293,7 +1319,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1374,7 +1404,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1463,7 +1497,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1544,7 +1582,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1634,14 +1676,18 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_MSSQL_TEST}",
"datasource": "${DS_MYSQL_TEST}",
"fill": 1,
"gridPos": {
"h": 8,
@ -1717,7 +1763,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1807,7 +1857,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1890,7 +1944,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1980,7 +2038,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2063,7 +2125,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2153,7 +2219,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2236,7 +2306,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": false,
@ -2315,8 +2389,8 @@
]
},
"time": {
"from": "2018-03-15T11:30:00.000Z",
"to": "2018-03-15T12:55:01.000Z"
"from": "2018-03-15T12:30:00.000Z",
"to": "2018-03-15T13:55:01.000Z"
},
"timepicker": {
"refresh_intervals": [
@ -2346,5 +2420,5 @@
"timezone": "",
"title": "MySQL Data Source Test",
"uid": "Hmf8FDkmz",
"version": 9
"version": 12
}

View File

@ -1,5 +1,6 @@
mysqltests:
image: mysql:latest
build:
context: blocks/mysql_tests
environment:
MYSQL_ROOT_PASSWORD: rootpass
MYSQL_DATABASE: grafana_tests
@ -7,7 +8,4 @@
MYSQL_PASSWORD: password
ports:
- "3306:3306"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
tmpfs: /var/lib/mysql:rw

View File

@ -0,0 +1,2 @@
CREATE DATABASE grafana_ds_tests;
GRANT ALL PRIVILEGES ON grafana_ds_tests.* TO 'grafana';

View File

@ -2,7 +2,7 @@
# http://localhost:3000 (Grafana running locally)
#
# Please note that you'll need to change the root_url in the Grafana configuration:
# root_url = %(protocol)s://%(domain)s:/grafana/
# root_url = %(protocol)s://%(domain)s:10080/grafana/
nginxproxy:
build: blocks/nginx_proxy

View File

@ -17,6 +17,7 @@ EXPOSE 389
VOLUME ["/etc/ldap", "/var/lib/ldap"]
COPY modules/ /etc/ldap.dist/modules
COPY prepopulate/ /etc/ldap.dist/prepopulate
COPY entrypoint.sh /entrypoint.sh

View File

@ -65,7 +65,7 @@ EOF
fi
if [[ -n "$SLAPD_ADDITIONAL_SCHEMAS" ]]; then
IFS=","; declare -a schemas=($SLAPD_ADDITIONAL_SCHEMAS)
IFS=","; declare -a schemas=($SLAPD_ADDITIONAL_SCHEMAS); unset IFS
for schema in "${schemas[@]}"; do
slapadd -n0 -F /etc/ldap/slapd.d -l "/etc/ldap/schema/${schema}.ldif" >/dev/null 2>&1
@ -73,14 +73,18 @@ EOF
fi
if [[ -n "$SLAPD_ADDITIONAL_MODULES" ]]; then
IFS=","; declare -a modules=($SLAPD_ADDITIONAL_MODULES)
IFS=","; declare -a modules=($SLAPD_ADDITIONAL_MODULES); unset IFS
for module in "${modules[@]}"; do
slapadd -n0 -F /etc/ldap/slapd.d -l "/etc/ldap/modules/${module}.ldif" >/dev/null 2>&1
done
fi
chown -R openldap:openldap /etc/ldap/slapd.d/
for file in `ls /etc/ldap/prepopulate/*.ldif`; do
slapadd -F /etc/ldap/slapd.d -l "$file"
done
chown -R openldap:openldap /etc/ldap/slapd.d/ /var/lib/ldap/ /var/run/slapd/
else
slapd_configs_in_env=`env | grep 'SLAPD_'`

View File

@ -0,0 +1,13 @@
# Notes on OpenLdap Docker Block
Any ldif files added to the prepopulate subdirectory will be automatically imported into the OpenLdap database.
The ldif files add three users, `ldapviewer`, `ldapeditor` and `ldapadmin`. Two groups, `admins` and `users`, are added that correspond with the group mappings in the default conf/ldap.toml. `ldapadmin` is a member of `admins` and `ldapeditor` is a member of `users`.
Note that users that are added here need to specify a `memberOf` attribute manually as well as the `member` attribute for the group. The `memberOf` module usually does this automatically (if you add a group in Apache Directory Studio for example) but this does not work in the entrypoint script as it uses the `slapadd` command to add entries before the server has started and before the `memberOf` module is loaded.
After adding ldif files to `prepopulate`:
1. Remove your current docker image: `docker rm docker_openldap_1`
2. Build: `docker-compose build`
3. `docker-compose up`

View File

@ -0,0 +1,10 @@
dn: cn=ldapadmin,dc=grafana,dc=org
mail: ldapadmin@grafana.com
userPassword: grafana
objectClass: person
objectClass: top
objectClass: inetOrgPerson
objectClass: organizationalPerson
sn: ldapadmin
cn: ldapadmin
memberOf: cn=admins,dc=grafana,dc=org

View File

@ -0,0 +1,5 @@
dn: cn=admins,dc=grafana,dc=org
cn: admins
member: cn=ldapadmin,dc=grafana,dc=org
objectClass: groupOfNames
objectClass: top

View File

@ -0,0 +1,10 @@
dn: cn=ldapeditor,dc=grafana,dc=org
mail: ldapeditor@grafana.com
userPassword: grafana
objectClass: person
objectClass: top
objectClass: inetOrgPerson
objectClass: organizationalPerson
sn: ldapeditor
cn: ldapeditor
memberOf: cn=users,dc=grafana,dc=org

View File

@ -0,0 +1,5 @@
dn: cn=users,dc=grafana,dc=org
cn: users
member: cn=ldapeditor,dc=grafana,dc=org
objectClass: groupOfNames
objectClass: top

View File

@ -0,0 +1,9 @@
dn: cn=ldapviewer,dc=grafana,dc=org
mail: ldapviewer@grafana.com
userPassword: grafana
objectClass: person
objectClass: top
objectClass: inetOrgPerson
objectClass: organizationalPerson
sn: ldapviewer
cn: ldapviewer

View File

@ -0,0 +1,3 @@
FROM postgres:latest
ADD setup.sql /docker-entrypoint-initdb.d
CMD ["postgres"]

View File

@ -100,7 +100,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1521725946837,
"iteration": 1523320929325,
"links": [],
"panels": [
{
@ -443,7 +443,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -522,7 +526,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -601,7 +609,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -680,7 +692,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -759,7 +775,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -838,7 +858,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -927,7 +951,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1008,7 +1036,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1097,7 +1129,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1178,7 +1214,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1267,7 +1307,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1348,7 +1392,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1437,7 +1485,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1518,7 +1570,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1608,7 +1664,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1691,7 +1751,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1781,7 +1845,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1864,7 +1932,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1954,7 +2026,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2037,7 +2113,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2127,7 +2207,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2210,7 +2294,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": false,

View File

@ -1,5 +1,6 @@
postgrestest:
image: postgres:latest
build:
context: blocks/postgres_tests
environment:
POSTGRES_USER: grafanatest
POSTGRES_PASSWORD: grafanatest

View File

@ -0,0 +1,3 @@
CREATE DATABASE grafanadstest;
REVOKE CONNECT ON DATABASE grafanadstest FROM PUBLIC;
GRANT CONNECT ON DATABASE grafanadstest TO grafanatest;

View File

@ -0,0 +1,3 @@
FROM prom/prometheus:v1.8.2
ADD prometheus.yml /etc/prometheus/
ADD alert.rules /etc/prometheus/

View File

@ -0,0 +1,10 @@
# Alert Rules
ALERT AppCrash
IF process_open_fds > 0
FOR 15s
LABELS { severity="critical" }
ANNOTATIONS {
summary = "Number of open fds > 0",
description = "Just testing"
}

View File

@ -0,0 +1,26 @@
prometheus:
build: blocks/prometheus_mac
ports:
- "9090:9090"
node_exporter:
image: prom/node-exporter
ports:
- "9100:9100"
fake-prometheus-data:
image: grafana/fake-data-gen
ports:
- "9091:9091"
environment:
FD_DATASOURCE: prom
alertmanager:
image: quay.io/prometheus/alertmanager
ports:
- "9093:9093"
prometheus-random-data:
build: blocks/prometheus_random_data
ports:
- "8081:8080"

View File

@ -0,0 +1,39 @@
# my global config
global:
scrape_interval: 10s # By default, scrape targets every 15 seconds.
evaluation_interval: 10s # By default, scrape targets every 15 seconds.
# scrape_timeout is set to the global default (10s).
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
rule_files:
- "alert.rules"
# - "first.rules"
# - "second.rules"
alerting:
alertmanagers:
- scheme: http
static_configs:
- targets:
- "alertmanager:9093"
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'node_exporter'
static_configs:
- targets: ['node_exporter:9100']
- job_name: 'fake-data-gen'
static_configs:
- targets: ['fake-prometheus-data:9091']
- job_name: 'grafana'
static_configs:
- targets: ['host.docker.internal:3000']
- job_name: 'prometheus-random-data'
static_configs:
- targets: ['prometheus-random-data:8080']

View File

@ -22,6 +22,6 @@ log() {
log $RUN_CMD
$RUN_CMD
# Exit immidiately in case of any errors or when we have interactive terminal
# Exit immediately in case of any errors or when we have interactive terminal
if [[ $? != 0 ]] || test -t 0; then exit $?; fi
log

View File

@ -100,7 +100,7 @@ datasources:
- name: Graphite
# <string, required> datasource type. Required
type: graphite
# <string, required> access mode. direct or proxy. Required
# <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
access: proxy
# <int> org id. will default to orgId 1 if not specified
orgId: 1
@ -138,6 +138,7 @@ datasources:
```
#### Custom Settings per Datasource
Please refer to each datasource documentation for specific provisioning examples.
| Datasource | Misc |
| ---- | ---- |
@ -196,16 +197,25 @@ providers:
folder: ''
type: file
disableDeletion: false
editable: false
options:
path: /var/lib/grafana/dashboards
```
When Grafana starts, it will update/insert all dashboards available in the configured path. Then later on poll that path and look for updated json files and insert those update/insert those into the database.
#### Making changes to a provisioned dashboard
It's possible to make changes to a provisioned dashboard in Grafana UI, but there's currently no possibility to automatically save the changes back to the provisioning source.
However, if you make changes to a provisioned dashboard you can `Save` the dashboard which will bring up a *Cannot save provisioned dashboard* dialog like seen in the screenshot below.
Here available options will let you `Copy JSON to Clipboard` and/or `Save JSON to file` which can help you synchronize your dashboard changes back to the provisioning source.
Note: The JSON shown in input field and when using `Copy JSON to Clipboard` and/or `Save JSON to file` will have the `id` field automatically removed to aid the provisioning workflow.
{{< docs-imagebox img="/img/docs/v51/provisioning_cannot_save_dashboard.png" max-width="500px" class="docs-image--no-shadow" >}}
### Reuseable Dashboard Urls
If the dashboard in the json file contains an [uid](/reference/dashboard/#json-fields), Grafana will force insert/update on that uid. This allows you to migrate dashboards betweens Grafana instances and provisioning Grafana from configuration without breaking the urls given since the new dashboard url uses the uid as identifer.
If the dashboard in the json file contains an [uid](/reference/dashboard/#json-fields), Grafana will force insert/update on that uid. This allows you to migrate dashboards betweens Grafana instances and provisioning Grafana from configuration without breaking the urls given since the new dashboard url uses the uid as identifier.
When Grafana starts, it will update/insert all dashboards available in the configured folders. If you modify the file, the dashboard will also be updated.
By default Grafana will delete dashboards in the database if the file is removed. You can disable this behavior using the `disableDeletion` setting.

View File

@ -153,10 +153,10 @@ Prometheus Alertmanager | `prometheus-alertmanager` | no
# Enable images in notifications {#external-image-store}
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessable (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessible (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
Amazon S3, Webdav, Google Cloud Storage and Azure Blob Storage. So to set that up you need to configure the [external image uploader](/installation/configuration/#external-image-storage) in your grafana-server ini config file.
Be aware that some notifiers requires public access to the image to be able to include it in the notification. So make sure to enable public access to the images. If your using local image uploader, your Grafana instance need to be accessible by the internet.
Be aware that some notifiers requires public access to the image to be able to include it in the notification. So make sure to enable public access to the images. If you're using local image uploader, your Grafana instance need to be accessible by the internet.
Currently only the Email Channels attaches images if no external image store is specified. To include images in alert notifications for other channels then you need to set up an external image store.

View File

@ -110,7 +110,7 @@ to `Keep Last State` in order to basically ignore them.
## Notifications
In alert tab you can also specify alert rule notifications along with a detailed messsage about the alert rule.
In alert tab you can also specify alert rule notifications along with a detailed message about the alert rule.
The message can contain anything, information about how you might solve the issue, link to runbook, etc.
The actual notifications are configured and shared between multiple alerts. Read the

View File

@ -1,6 +1,6 @@
+++
title = "Contributor Licence Agreement (CLA)"
description = "Contributer Licence Agreement (CLA)"
description = "Contributor Licence Agreement (CLA)"
type = "docs"
aliases = ["/project/cla", "docs/contributing/cla.html"]
[menu.docs]
@ -101,4 +101,4 @@ TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU [OR US]
<br>
<br>
<br>
This CLA aggreement is based on the [Harmony Contributor Aggrement Template (combined)](http://www.harmonyagreements.org/agreements.html), [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/)
This CLA agreement is based on the [Harmony Contributor Aggrement Template (combined)](http://www.harmonyagreements.org/agreements.html), [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/)

View File

@ -43,6 +43,40 @@ server is running on AWS you can use IAM Roles and authentication will be handle
Checkout AWS docs on [IAM Roles](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
## IAM Policies
Grafana needs permissions granted via IAM to be able to read CloudWatch metrics
and EC2 tags/instances. You can attach these permissions to IAM roles and
utilize Grafana's built-in support for assuming roles.
Here is a minimal policy example:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowReadingMetricsFromCloudWatch",
"Effect": "Allow",
"Action": [
"cloudwatch:ListMetrics",
"cloudwatch:GetMetricStatistics"
],
"Resource": "*"
},
{
"Sid": "AllowReadingTagsFromEC2",
"Effect": "Allow",
"Action": [
"ec2:DescribeTags",
"ec2:DescribeInstances"
],
"Resource": "*"
}
]
}
```
### AWS credentials file
Create a file at `~/.aws/credentials`. That is the `HOME` path for user running grafana-server.
@ -173,3 +207,37 @@ Amazon provides 1 million CloudWatch API requests each month at no additional ch
it costs $0.01 per 1,000 GetMetricStatistics or ListMetrics requests. For each query Grafana will
issue a GetMetricStatistics request and every time you pick a dimension in the query editor
Grafana will issue a ListMetrics request.
## Configure the Datasource with Provisioning
It's now possible to configure datasources using config files with Grafana's provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
Using a credentials file
```yaml
apiVersion: 1
datasources:
- name: Cloudwatch
type: cloudwatch
jsonData:
authType: credentials
defaultRegion: eu-west-2
```
Using `accessKey` and `secretKey`
```yaml
apiVersion: 1
datasources:
- name: Cloudwatch
type: cloudwatch
jsonData:
authType: keys
defaultRegion: eu-west-2
secureJsonData:
accessKey: "<your access key>"
secretKey: "<your secret key>"
```

View File

@ -29,13 +29,19 @@ Name | Description
*Name* | The data source name. This is how you refer to the data source in panels & queries.
*Default* | Default data source means that it will be pre-selected for new panels.
*Url* | The HTTP protocol, IP, and port of your Elasticsearch server.
*Access* | Proxy = access via Grafana backend, Direct = access directly from browser.
*Access* | Server (default) = URL needs to be accessible from the Grafana backend/server, Browser = URL needs to be accessible from the browser.
Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication to the browser.
Access mode controls how requests to the data source will be handled. Server should be the preferred way if nothing else stated.
### Direct access
### Server access mode (Default)
If you select direct access you must update your Elasticsearch configuration to allow other domains to access
All requests will be made from the browser to Grafana backend/server which in turn will forward the requests to the data source and by that circumvent possible Cross-Origin Resource Sharing (CORS) requirements. The URL needs to be accessible from the grafana backend/server if you select this access mode.
### Browser (Direct) access
All requests will be made from the browser directly to the data source and may be subject to Cross-Origin Resource Sharing (CORS) requirements. The URL needs to be accessible from the browser if you select this access mode.
If you select Browser access you must update your Elasticsearch configuration to allow other domains to access
Elasticsearch from the browser. You do this by specifying these to options in your **elasticsearch.yml** config file.
```bash
@ -45,7 +51,7 @@ http.cors.allow-origin: "*"
### Index settings
![](/img/docs/elasticsearch/elasticsearch_ds_details.png)
![Elasticsearch Datasource Details](/img/docs/elasticsearch/elasticsearch_ds_details.png)
Here you can specify a default for the `time field` and specify the name of your Elasticsearch index. You can use
a time pattern for the index name or a wildcard.
@ -55,9 +61,25 @@ a time pattern for the index name or a wildcard.
Be sure to specify your Elasticsearch version in the version selection dropdown. This is very important as there are differences how queries are composed. Currently only 2.x and 5.x
are supported.
### Min time interval
A lower limit for the auto group by time interval. Recommended to be set to write frequency, for example `1m` if your data is written every minute.
This option can also be overridden/configured in a dashboard panel under data source options. It's important to note that this value **needs** to be formatted as a
number followed by a valid time identifier, e.g. `1m` (1 minute) or `30s` (30 seconds). The following time identifiers are supported:
Identifier | Description
------------ | -------------
`y` | year
`M` | month
`w` | week
`d` | day
`h` | hour
`m` | minute
`s` | second
`ms` | millisecond
## Metric Query editor
![](/img/docs/elasticsearch/query_editor.png)
![Elasticsearch Query Editor](/img/docs/elasticsearch/query_editor.png)
The Elasticsearch query editor allows you to select multiple metrics and group by multiple terms or filters. Use the plus and minus icons to the right to add/remove
metrics or group by clauses. Some metrics and group by clauses haves options, click the option text to expand the row to view and edit metric or group by options.
@ -137,3 +159,23 @@ Query | You can leave the search query blank or specify a lucene query
Time | The name of the time field, needs to be date field.
Text | Event description field.
Tags | Optional field name to use for event tags (can be an array or a CSV string).
## Configure the Datasource with Provisioning
It's now possible to configure datasources using config files with Grafana's provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Elastic
type: elasticsearch
access: proxy
database: "[metrics-]YYYY.MM.DD"
url: http://localhost:9200
jsonData:
interval: Daily
timeField: "@timestamp"
```

View File

@ -31,20 +31,28 @@ Name | Description
*Name* | The data source name. This is how you refer to the data source in panels & queries.
*Default* | Default data source means that it will be pre-selected for new panels.
*Url* | The HTTP protocol, IP, and port of your graphite-web or graphite-api install.
*Access* | Proxy = access via Grafana backend, Direct = access directly from browser.
*Access* | Server (default) = URL needs to be accessible from the Grafana backend/server, Browser = URL needs to be accessible from the browser.
Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the browser.
Access mode controls how requests to the data source will be handled. Server should be the preferred way if nothing else stated.
### Server access mode (Default)
All requests will be made from the browser to Grafana backend/server which in turn will forward the requests to the data source and by that circumvent possible Cross-Origin Resource Sharing (CORS) requirements. The URL needs to be accessible from the grafana backend/server if you select this access mode.
### Browser access mode
All requests will be made from the browser directly to the data source and may be subject to Cross-Origin Resource Sharing (CORS) requirements. The URL needs to be accessible from the browser if you select this access mode.
## Metric editor
### Navigate metric segments
Click the ``Select metric`` link to start navigating the metric space. One you start you can continue using the mouse
or keyboard arrow keys. You can select a wildcard and still continue.
{{< docs-imagebox img="/img/docs/v45/graphite_query1_still.png"
animated-gif="/img/docs/v45/graphite_query1.gif" >}}
### Functions
Click the plus icon to the right to add a function. You can search for the function or select it from the menu. Once
@ -55,7 +63,6 @@ by the x icon.
{{< docs-imagebox img="/img/docs/v45/graphite_query2_still.png"
animated-gif="/img/docs/v45/graphite_query2.gif" >}}
### Optional parameters
Some functions like aliasByNode support an optional second argument. To add this parameter specify for example 3,-2 as the first parameter and the function editor will adapt and move the -2 to a second parameter. To remove the second optional parameter just click on it and leave it blank and the editor will remove it.
@ -63,7 +70,6 @@ Some functions like aliasByNode support an optional second argument. To add this
{{< docs-imagebox img="/img/docs/v45/graphite_query3_still.png"
animated-gif="/img/docs/v45/graphite_query3.gif" >}}
### Nested Queries
You can reference queries by the row “letter” that theyre on (similar to Microsoft Excel). If you add a second query to a graph, you can reference the first query simply by typing in #A. This provides an easy and convenient way to build compounded queries.
@ -71,7 +77,6 @@ You can reference queries by the row “letter” that theyre on (similar to
{{< docs-imagebox img="/img/docs/v45/graphite_nested_queries_still.png"
animated-gif="/img/docs/v45/graphite_nested_queries.gif" >}}
## Point consolidation
All Graphite metrics are consolidated so that Graphite doesn't return more data points than there are pixels in the graph. By default,
@ -89,6 +94,18 @@ being displayed in your dashboard.
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different
types of template variables.
Graphite 1.1 introduced tags and Grafana added support for Graphite queries with tags in version 5.0. To create a variable using tag values, then you need to use the Grafana functions `tags` and `tag_values`.
Query | Description
------------ | -------------
*tags()* | Returns all tags.
*tags(server=~backend\*)* | Returns only tags that occur in series matching the filter expression.
*tag_values(server)* | Return tag values for the specified tag.
*tag_values(server, server=~backend\*)* | Returns filtered tag values that occur for the specified tag in series matching those expressions.
*tag_values(server, server=~backend\*, app=~${apps:regex})* | Multiple filter expressions and expressions can contain other variables.
For more details, see the [Graphite docs on the autocomplete api for tags](http://graphite.readthedocs.io/en/latest/tags.html#auto-complete-support).
### Query variable
The query you specify in the query field should be a metric find type of query. For example, a query like `prod.servers.*` will fill the
@ -97,10 +114,10 @@ variable with all possible values that exist in the wildcard position.
You can also create nested variables that use other variables in their definition. For example
`apps.$app.servers.*` uses the variable `$app` in its query definition.
### Variable usage
### Variable Usage
You can use a variable in a metric node path or as a parameter to a function.
![](/img/docs/v2/templated_variable_parameter.png)
![variable](/img/docs/v2/templated_variable_parameter.png)
There are two syntaxes:
@ -113,6 +130,18 @@ the second syntax in expressions like `my.server[[serverNumber]].count`.
Example:
[Graphite Templated Dashboard](http://play.grafana.org/dashboard/db/graphite-templated-nested)
### Variable Usage in Tag Queries
Multi-value variables in tag queries use the advanced formatting syntax introduced in Grafana 5.0 for variables: `{var:regex}`. Non-tag queries will use the default glob formatting for multi-value variables.
Example of a tag expression with regex formatting and using the Equal Tilde operator, `=~`:
```text
server=~${servers:regex}
```
Checkout the [Advanced Formatting Options section in the Variables]({{< relref "reference/templating.md#advanced-formatting-options" >}}) documentation for examples and details.
## Annotations
[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation
@ -120,3 +149,21 @@ queries via the Dashboard menu / Annotations view.
Graphite supports two ways to query annotations. A regular metric query, for this you use the `Graphite query` textbox. A Graphite events query, use the `Graphite event tags` textbox,
specify a tag or wildcard (leave empty should also work)
## Configure the Datasource with Provisioning
It's now possible to configure datasources using config files with Grafana's provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Graphite
type: graphite
access: proxy
url: http://localhost:8080
jsonData:
graphiteVersion: "1.1"
```

View File

@ -30,6 +30,7 @@ The following datasources are officially supported:
* [Prometheus]({{< relref "prometheus.md" >}})
* [MySQL]({{< relref "mysql.md" >}})
* [Postgres]({{< relref "postgres.md" >}})
* [Microsoft SQL Server (MSSQL)]({{< relref "mssql.md" >}})
## Data source plugins

View File

@ -28,16 +28,36 @@ Name | Description
*Name* | The data source name. This is how you refer to the data source in panels & queries.
*Default* | Default data source means that it will be pre-selected for new panels.
*Url* | The http protocol, ip and port of you influxdb api (influxdb api port is by default 8086)
*Access* | Proxy = access via Grafana backend, Direct = access directly from browser.
*Access* | Server (default) = URL needs to be accessible from the Grafana backend/server, Browser = URL needs to be accessible from the browser.
*Database* | Name of your influxdb database
*User* | Name of your database user
*Password* | Database user's password
### Proxy vs Direct access
Access mode controls how requests to the data source will be handled. Server should be the preferred way if nothing else stated.
Proxy access means that the Grafana backend will proxy all requests from the browser. So requests to InfluxDB will be channeled through
`grafana-server`. This means that the URL you specify needs to be accessible from the server you are running Grafana on. Proxy access
mode is also more secure as the username & password will never reach the browser.
### Server access mode (Default)
All requests will be made from the browser to Grafana backend/server which in turn will forward the requests to the data source and by that circumvent possible Cross-Origin Resource Sharing (CORS) requirements. The URL needs to be accessible from the grafana backend/server if you select this access mode.
### Browser access mode
All requests will be made from the browser directly to the data source and may be subject to Cross-Origin Resource Sharing (CORS) requirements. The URL needs to be accessible from the browser if you select this access mode.
### Min time interval
A lower limit for the auto group by time interval. Recommended to be set to write frequency, for example `1m` if your data is written every minute.
This option can also be overridden/configured in a dashboard panel under data source options. It's important to note that this value **needs** to be formatted as a
number followed by a valid time identifier, e.g. `1m` (1 minute) or `30s` (30 seconds). The following time identifiers are supported:
Identifier | Description
------------ | -------------
`y` | year
`M` | month
`w` | week
`d` | day
`h` | hour
`m` | minute
`s` | second
`ms` | millisecond
## Query Editor
@ -174,3 +194,22 @@ SELECT title, description from events WHERE $timeFilter order asc
For InfluxDB you need to enter a query like in the above example. You need to have the ```where $timeFilter```
part. If you only select one column you will not need to enter anything in the column mapping fields. The
Tags field can be a comma separated string.
## Configure the Datasource with Provisioning
It's now possible to configure datasources using config files with Grafana's provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: InfluxDB
type: influxdb
access: proxy
database: site
user: grafana
password: grafana
url: http://localhost:8086
```

View File

@ -0,0 +1,565 @@
+++
title = "Using Microsoft SQL Server in Grafana"
description = "Guide for using Microsoft SQL Server in Grafana"
keywords = ["grafana", "MSSQL", "Microsoft", "SQL", "guide", "Azure SQL Database"]
type = "docs"
[menu.docs]
name = "Microsoft SQL Server"
parent = "datasources"
weight = 7
+++
# Using Microsoft SQL Server in Grafana
> Only available in Grafana v5.1+.
Grafana ships with a built-in Microsoft SQL Server (MSSQL) data source plugin that allows you to query and visualize data from any Microsoft SQL Server 2005 or newer, including Microsoft Azure SQL Database.
## Adding the data source
1. Open the side menu by clicking the Grafana icon in the top header.
2. In the side menu under the `Configuration` link you should find a link named `Data Sources`.
3. Click the `+ Add data source` button in the top header.
4. Select *Microsoft SQL Server* from the *Type* dropdown.
### Data source options
Name | Description
------------ | -------------
*Name* | The data source name. This is how you refer to the data source in panels & queries.
*Default* | Default data source means that it will be pre-selected for new panels.
*Host* | The IP address/hostname and optional port of your MSSQL instance. If port is omitted, default 1433 will be used.
*Database* | Name of your MSSQL database.
*User* | Database user's login/username
*Password* | Database user's password
### Database User Permissions (Important!)
The database user you specify when you add the data source should only be granted SELECT permissions on
the specified database & tables you want to query. Grafana does not validate that the query is safe. The query
could include any SQL statement. For example, statements like `DELETE FROM user;` and `DROP TABLE user;` would be
executed. To protect against this we **Highly** recommend you create a specific MSSQL user with restricted permissions.
Example:
```sql
CREATE USER grafanareader WITH PASSWORD 'password'
GRANT SELECT ON dbo.YourTable3 TO grafanareader
```
Make sure the user does not get any unwanted privileges from the public role.
### Known Issues
MSSQL 2008 and 2008 R2 engine cannot handle login records when SSL encryption is not disabled. Due to this you may receive an `Login error: EOF` error when trying to create your datasource.
To fix MSSQL 2008 R2 issue, install MSSQL 2008 R2 Service Pack 2. To fix MSSQL 2008 issue, install Microsoft MSSQL 2008 Service Pack 3 and Cumulative update package 3 for MSSQL 2008 SP3.
## Query Editor
{{< docs-imagebox img="/img/docs/v51/mssql_query_editor.png" class="docs-image--no-shadow" >}}
You find the MSSQL query editor in the metrics tab in Graph, Singlestat or Table panel's edit mode. You enter edit mode by clicking the
panel title, then edit. The editor allows you to define a SQL query to select data to be visualized.
1. Select *Format as* `Time series` (for use in Graph or Singlestat panel's among others) or `Table` (for use in Table panel among others).
2. This is the actual editor where you write your SQL queries.
3. Show help section for MSSQL below the query editor.
4. Show actual executed SQL query. Will be available first after a successful query has been executed.
5. Add an additional query where an additional query editor will be displayed.
<div class="clearfix"></div>
## Macros
To simplify syntax and to allow for dynamic parts, like date range filters, the query can contain macros.
Macro example | Description
------------ | -------------
*$__time(dateColumn)* | Will be replaced by an expression to rename the column to *time*. For example, *dateColumn as time*
*$__timeEpoch(dateColumn)* | Will be replaced by an expression to convert a DATETIME column type to unix timestamp and rename it to *time*. <br/>For example, *DATEDIFF(second, '1970-01-01', dateColumn) AS time*
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. <br/>For example, *dateColumn >= DATEADD(s, 1494410783, '1970-01-01') AND dateColumn <= DATEADD(s, 1494410783, '1970-01-01')*
*$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *DATEADD(second, 1494410783, '1970-01-01')*
*$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *DATEADD(second, 1494410783, '1970-01-01')*
*$__timeGroup(dateColumn,'5m'[, fillvalue])* | Will be replaced by an expression usable in GROUP BY clause. Providing a *fillValue* of *NULL* or *floating value* will automatically fill empty series in timerange with that value. <br/>For example, *CAST(ROUND(DATEDIFF(second, '1970-01-01', time_column)/300.0, 0) as bigint)\*300*.
*$__timeGroup(dateColumn,'5m', 0)* | Same as above but with a fill parameter so all null values will be converted to the fill value (all null values would be set to zero using this example).
*$__unixEpochFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name with times represented as unix timestamp. For example, *dateColumn > 1494410783 AND dateColumn < 1494497183*
*$__unixEpochFrom()* | Will be replaced by the start of the currently active time selection as unix timestamp. For example, *1494410783*
*$__unixEpochTo()* | Will be replaced by the end of the currently active time selection as unix timestamp. For example, *1494497183*
We plan to add many more macros. If you have suggestions for what macros you would like to see, please [open an issue](https://github.com/grafana/grafana) in our GitHub repo.
The query editor has a link named `Generated SQL` that shows up after a query has been executed, while in panel edit mode. Click on it and it will expand and show the raw interpolated SQL string that was executed.
## Table queries
If the `Format as` query option is set to `Table` then you can basically do any type of SQL query. The table panel will automatically show the results of whatever columns & rows your query returns.
**Example database table:**
```sql
CREATE TABLE [event] (
time_sec bigint,
description nvarchar(100),
tags nvarchar(100),
)
```
```sql
CREATE TABLE [mssql_types] (
c_bit bit, c_tinyint tinyint, c_smallint smallint, c_int int, c_bigint bigint, c_money money, c_smallmoney smallmoney, c_numeric numeric(10,5),
c_real real, c_decimal decimal(10,2), c_float float,
c_char char(10), c_varchar varchar(10), c_text text,
c_nchar nchar(12), c_nvarchar nvarchar(12), c_ntext ntext,
c_datetime datetime, c_datetime2 datetime2, c_smalldatetime smalldatetime, c_date date, c_time time, c_datetimeoffset datetimeoffset
)
INSERT INTO [mssql_types]
SELECT
1, 5, 20020, 980300, 1420070400, '$20000.15', '£2.15', 12345.12,
1.11, 2.22, 3.33,
'char10', 'varchar10', 'text',
N'☺nchar12☺', N'☺nvarchar12☺', N'☺text☺',
GETDATE(), CAST(GETDATE() AS DATETIME2), CAST(GETDATE() AS SMALLDATETIME), CAST(GETDATE() AS DATE), CAST(GETDATE() AS TIME), SWITCHOFFSET(CAST(GETDATE() AS DATETIMEOFFSET), '-07:00'))
```
Query editor with example query:
{{< docs-imagebox img="/img/docs/v51/mssql_table_query.png" max-width="500px" class="docs-image--no-shadow" >}}
The query:
```sql
SELECT * FROM [mssql_types]
```
You can control the name of the Table panel columns by using regular `AS ` SQL column selection syntax. Example:
```sql
SELECT
c_bit as [column1], c_tinyint as [column2]
FROM
[mssql_types]
```
The resulting table panel:
{{< docs-imagebox img="/img/docs/v51/mssql_table_result.png" max-width="1489px" class="docs-image--no-shadow" >}}
## Time series queries
If you set `Format as` to `Time series`, for use in Graph panel for example, then the query must must have a column named `time` that returns either a sql datetime or any numeric datatype representing unix epoch in seconds. You may return a column named `metric` that is used as metric name for the value column. Any column except `time` and `metric` is treated as a value column. If you omit the `metric` column, tha name of the value column will be the metric name. You may select multiple value columns, each will have its name as metric.
**Example database table:**
```sql
CREATE TABLE [event] (
time_sec bigint,
description nvarchar(100),
tags nvarchar(100),
)
```
```sql
CREATE TABLE metric_values (
time datetime,
measurement nvarchar(100),
valueOne int,
valueTwo int,
)
INSERT metric_values (time, measurement, valueOne, valueTwo) VALUES('2018-03-15 12:30:00', 'Metric A', 62, 6)
INSERT metric_values (time, measurement, valueOne, valueTwo) VALUES('2018-03-15 12:30:00', 'Metric B', 49, 11)
...
INSERT metric_values (time, measurement, valueOne, valueTwo) VALUES('2018-03-15 13:55:00', 'Metric A', 14, 25)
INSERT metric_values (time, measurement, valueOne, valueTwo) VALUES('2018-03-15 13:55:00', 'Metric B', 48, 10)
```
{{< docs-imagebox img="/img/docs/v51/mssql_time_series_one.png" class="docs-image--no-shadow docs-image--right" >}}
**Example with one `value` and one `metric` column.**
```sql
SELECT
time,
valueOne,
measurement as metric
FROM
metric_values
WHERE
$__timeFilter(time)
ORDER BY 1
```
When above query are used in a graph panel the result will be two series named `Metric A` and `Metric B` with value of `valueOne` and `valueTwo` plotted over `time`.
<div class="clearfix"></div>
{{< docs-imagebox img="/img/docs/v51/mssql_time_series_two.png" class="docs-image--no-shadow docs-image--right" >}}
**Example with multiple `value` culumns:**
```sql
SELECT
time,
valueOne,
valueTwo
FROM
metric_values
WHERE
$__timeFilter(time)
ORDER BY 1
```
When above query are used in a graph panel the result will be two series named `valueOne` and `valueTwo` with value of `valueOne` and `valueTwo` plotted over `time`.
<div class="clearfix"></div>
{{< docs-imagebox img="/img/docs/v51/mssql_time_series_three.png" class="docs-image--no-shadow docs-image--right" >}}
**Example using the $__timeGroup macro:**
```sql
SELECT
$__timeGroup(time, '3m') as time,
measurement as metric,
avg(valueOne)
FROM
metric_values
WHERE
$__timeFilter(time)
GROUP BY
$__timeGroup(time, '3m'),
measurement
ORDER BY 1
```
When above query are used in a graph panel the result will be two series named `Metric A` and `Metric B` with an average of `valueOne` plotted over `time`.
Any two series lacking a value in a 3 minute window will render a line between those two lines. You'll notice that the graph to the right never goes down to zero.
<div class="clearfix"></div>
{{< docs-imagebox img="/img/docs/v51/mssql_time_series_four.png" class="docs-image--no-shadow docs-image--right" >}}
**Example using the $__timeGroup macro with fill parameter set to zero:**
```sql
SELECT
$__timeGroup(time, '3m', 0) as time,
measurement as metric,
sum(valueTwo)
FROM
metric_values
WHERE
$__timeFilter(time)
GROUP BY
$__timeGroup(time, '3m'),
measurement
ORDER BY 1
```
When above query are used in a graph panel the result will be two series named `Metric A` and `Metric B` with a sum of `valueTwo` plotted over `time`.
Any series lacking a value in a 3 minute window will have a value of zero which you'll see rendered in the graph to the right.
## Templating
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data being displayed in your dashboard.
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different types of template variables.
### Query Variable
If you add a template variable of the type `Query`, you can write a MSSQL query that can
return things like measurement names, key names or key values that are shown as a dropdown select box.
For example, you can have a variable that contains all values for the `hostname` column in a table if you specify a query like this in the templating variable *Query* setting.
```sql
SELECT hostname FROM host
```
A query can return multiple columns and Grafana will automatically create a list from them. For example, the query below will return a list with values from `hostname` and `hostname2`.
```sql
SELECT [host].[hostname], [other_host].[hostname2] FROM host JOIN other_host ON [host].[city] = [other_host].[city]
```
Another option is a query that can create a key/value variable. The query should return two columns that are named `__text` and `__value`. The `__text` column value should be unique (if it is not unique then the first value is used). The options in the dropdown will have a text and value that allows you to have a friendly name as text and an id as the value. An example query with `hostname` as the text and `id` as the value:
```sql
SELECT hostname __text, id __value FROM host
```
You can also create nested variables. For example if you had another variable named `region`. Then you could have
the hosts variable only show hosts from the current selected region with a query like this (if `region` is a multi-value variable then use the `IN` comparison operator rather than `=` to match against multiple values):
```sql
SELECT hostname FROM host WHERE region IN ($region)
```
### Using Variables in Queries
> From Grafana 4.3.0 to 4.6.0, template variables are always quoted automatically so if it is a string value do not wrap them in quotes in where clauses.
>
> From Grafana 5.0.0, template variable values are only quoted when the template variable is a `multi-value`.
If the variable is a multi-value variable then use the `IN` comparison operator rather than `=` to match against multiple values.
There are two syntaxes:
`$<varname>` Example with a template variable named `hostname`:
```sql
SELECT
atimestamp time,
aint value
FROM table
WHERE $__timeFilter(atimestamp) and hostname in($hostname)
ORDER BY atimestamp
```
`[[varname]]` Example with a template variable named `hostname`:
```sql
SELECT
atimestamp as time,
aint as value
FROM table
WHERE $__timeFilter(atimestamp) and hostname in([[hostname]])
ORDER BY atimestamp
```
#### Disabling Quoting for Multi-value Variables
Grafana automatically creates a quoted, comma-separated string for multi-value variables. For example: if `server01` and `server02` are selected then it will be formatted as: `'server01', 'server02'`. Do disable quoting, use the csv formatting option for variables:
`${servers:csv}`
Read more about variable formatting options in the [Variables]({{< relref "reference/templating.md#advanced-formatting-options" >}}) documentation.
## Annotations
[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation queries via the Dashboard menu / Annotations view.
**Columns:**
Name | Description
------------ | -------------
time | The name of the date/time field. Could be a column with a native sql date/time data type or epoch value.
text | Event description field.
tags | Optional field name to use for event tags as a comma separated string.
**Example database tables:**
```sql
CREATE TABLE [events] (
time_sec bigint,
description nvarchar(100),
tags nvarchar(100),
)
```
We also use the database table defined in [Time series queries](#time-series-queries).
**Example query using time column with epoch values:**
```sql
SELECT
time_sec as time,
description as [text],
tags
FROM
[events]
WHERE
$__unixEpochFilter(time_sec)
ORDER BY 1
```
**Example query using time column of native sql date/time data type:**
```sql
SELECT
time,
measurement as text,
convert(varchar, valueOne) + ',' + convert(varchar, valueTwo) as tags
FROM
metric_values
WHERE
$__timeFilter(time_column)
ORDER BY 1
```
## Stored procedure support
Stored procedures have been verified to work. However, please note that we haven't done anything special to support this why there may exist edge cases where it won't work as you would expect.
Stored procedures should be supported in table, time series and annotation queries as long as you use the same naming of columns and return data in the same format as describe above under respective section.
Please note that any macro function will not work inside a stored procedure.
### Examples
{{< docs-imagebox img="/img/docs/v51/mssql_metrics_graph.png" class="docs-image--no-shadow docs-image--right" >}}
For the following examples the database table defined in [Time series queries](#time-series-queries). Let's say that we want to visualize 4 series in a graph panel, i.e. all combinations of columns `valueOne`, `valueTwo` and `measurement`. Graph panel to the right visualizes what we want to achieve. To solve this we actually need to use two queries:
**First query:**
```sql
SELECT
$__timeGroup(time, '5m') as time,
measurement + ' - value one' as metric,
avg(valueOne) as valueOne
FROM
metric_values
WHERE
$__timeFilter(time)
GROUP BY
$__timeGroup(time, '5m'),
measurement
ORDER BY 1
```
**Second query:**
```sql
SELECT
$__timeGroup(time, '5m') as time,
measurement + ' - value two' as metric,
avg(valueTwo) as valueTwo
FROM
metric_values
GROUP BY
$__timeGroup(time, '5m'),
measurement
ORDER BY 1
```
#### Stored procedure using time in epoch format
We can define a stored procedure that will return all data we need to render 4 series in a graph panel like above.
In this case the stored procedure accepts two parameters `@from` and `@to` of `int` data types which should be a timerange (from-to) in epoch format
which will be used to filter the data to return from the stored procedure.
We're mimicking the `$__timeGroup(time, '5m')` in the select and group by expressions and that's why there's a lot of lengthy expressions needed -
these could be extracted to MSSQL functions, if wanted.
```sql
CREATE PROCEDURE sp_test_epoch(
@from int,
@to int
) AS
BEGIN
SELECT
cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time))/600 as int)*600 as int) as time,
measurement + ' - value one' as metric,
avg(valueOne) as value
FROM
metric_values
WHERE
time >= DATEADD(s, @from, '1970-01-01') AND time <= DATEADD(s, @to, '1970-01-01')
GROUP BY
cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time))/600 as int)*600 as int),
measurement
UNION ALL
SELECT
cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time))/600 as int)*600 as int) as time,
measurement + ' - value two' as metric,
avg(valueTwo) as value
FROM
metric_values
WHERE
time >= DATEADD(s, @from, '1970-01-01') AND time <= DATEADD(s, @to, '1970-01-01')
GROUP BY
cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time))/600 as int)*600 as int),
measurement
ORDER BY 1
END
```
Then we can use the following query for our graph panel.
```sql
DECLARE
@from int = $__unixEpochFrom(),
@to int = $__unixEpochTo()
EXEC dbo.sp_test_epoch @from, @to
```
#### Stored procedure using time in datetime format
We can define a stored procedure that will return all data we need to render 4 series in a graph panel like above.
In this case the stored procedure accepts two parameters `@from` and `@to` of `datetime` data types which should be a timerange (from-to)
which will be used to filter the data to return from the stored procedure.
We're mimicking the `$__timeGroup(time, '5m')` in the select and group by expressions and that's why there's a lot of lengthy expressions needed -
these could be extracted to MSSQL functions, if wanted.
```sql
CREATE PROCEDURE sp_test_datetime(
@from datetime,
@to datetime
) AS
BEGIN
SELECT
cast(cast(DATEDIFF(second, {d '1970-01-01'}, time)/600 as int)*600 as int) as time,
measurement + ' - value one' as metric,
avg(valueOne) as value
FROM
metric_values
WHERE
time >= @from AND time <= @to
GROUP BY
cast(cast(DATEDIFF(second, {d '1970-01-01'}, time)/600 as int)*600 as int),
measurement
UNION ALL
SELECT
cast(cast(DATEDIFF(second, {d '1970-01-01'}, time)/600 as int)*600 as int) as time,
measurement + ' - value two' as metric,
avg(valueTwo) as value
FROM
metric_values
WHERE
time >= @from AND time <= @to
GROUP BY
cast(cast(DATEDIFF(second, {d '1970-01-01'}, time)/600 as int)*600 as int),
measurement
ORDER BY 1
END
```
Then we can use the following query for our graph panel.
```sql
DECLARE
@from datetime = $__timeFrom(),
@to datetime = $__timeTo()
EXEC dbo.sp_test_datetime @from, @to
```
## Alerting
Time series queries should work in alerting conditions. Table formatted queries are not yet supported in alert rule
conditions.
## Configure the Datasource with Provisioning
It's now possible to configure datasources using config files with Grafana's provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: MSSQL
type: mssql
url: localhost:1433
database: grafana
user: grafana
secureJsonData:
password: "Password!"
```

View File

@ -12,6 +12,8 @@ weight = 7
# Using MySQL in Grafana
> Only available in Grafana v4.3+.
>
> Starting from Grafana v5.1 you can name the time column *time* in addition to earlier supported *time_sec*. Usage of *time_sec* will eventually be deprecated.
Grafana ships with a built-in MySQL data source plugin that allow you to query any visualize
data from a MySQL compatible database.
@ -23,6 +25,17 @@ data from a MySQL compatible database.
3. Click the `+ Add data source` button in the top header.
4. Select *MySQL* from the *Type* dropdown.
### Data source options
Name | Description
------------ | -------------
*Name* | The data source name. This is how you refer to the data source in panels & queries.
*Default* | Default data source means that it will be pre-selected for new panels.
*Host* | The IP address/hostname and optional port of your MySQL instance.
*Database* | Name of your MySQL database.
*User* | Database user's login/username
*Password* | Database user's password
### Database User Permissions (Important!)
The database user you specify when you add the data source should only be granted SELECT permissions on
@ -46,10 +59,12 @@ To simplify syntax and to allow for dynamic parts, like date range filters, the
Macro example | Description
------------ | -------------
*$__time(dateColumn)* | Will be replaced by an expression to convert to a UNIX timestamp and rename the column to `time_sec`. For example, *UNIX_TIMESTAMP(dateColumn) as time_sec*
*$__timeEpoch(dateColumn)* | Will be replaced by an expression to convert to a UNIX timestamp and rename the column to `time_sec`. For example, *UNIX_TIMESTAMP(dateColumn) as time_sec*
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn > FROM_UNIXTIME(1494410783) AND dateColumn < FROM_UNIXTIME(1494497183)*
*$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *FROM_UNIXTIME(1494410783)*
*$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *FROM_UNIXTIME(1494497183)*
*$__timeGroup(dateColumn,'5m')* | Will be replaced by an expression usable in GROUP BY clause. For example, *cast(cast(UNIX_TIMESTAMP(dateColumn)/(300) as signed)*300 as signed) as time_sec,*
*$__timeGroup(dateColumn,'5m')* | Will be replaced by an expression usable in GROUP BY clause. For example, *cast(cast(UNIX_TIMESTAMP(dateColumn)/(300) as signed)*300 as signed),*
*$__timeGroup(dateColumn,'5m',0)* | Same as above but with a fill parameter so all null values will be converted to the fill value (all null values would be set to zero using this example).
*$__unixEpochFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name with times represented as unix timestamp. For example, *dateColumn > 1494410783 AND dateColumn < 1494497183*
*$__unixEpochFrom()* | Will be replaced by the start of the currently active time selection as unix timestamp. For example, *1494410783*
*$__unixEpochTo()* | Will be replaced by the end of the currently active time selection as unix timestamp. For example, *1494497183*
@ -84,39 +99,50 @@ The resulting table panel:
![](/img/docs/v43/mysql_table.png)
### Time series queries
## Time series queries
If you set `Format as` to `Time series`, for use in Graph panel for example, then there are some requirements for
what your query returns.
If you set `Format as` to `Time series`, for use in Graph panel for example, then the query must return a column named `time` that returns either a sql datetime or any numeric datatype representing unix epoch.
Any column except `time` and `metric` is treated as a value column.
You may return a column named `metric` that is used as metric name for the value column.
- Must be a column named `time_sec` representing a unix epoch in seconds.
- Must be a column named `value` representing the time series value.
- Must be a column named `metric` representing the time series name.
Example:
**Example with `metric` column:**
```sql
SELECT
min(UNIX_TIMESTAMP(time_date_time)) as time_sec,
max(value_double) as value,
metric1 as metric
FROM test_data
WHERE $__timeFilter(time_date_time)
GROUP BY metric1, UNIX_TIMESTAMP(time_date_time) DIV 300
ORDER BY time_sec asc
```
Example with $__timeGroup macro:
```sql
SELECT
$__timeGroup(time_date_time,'5m') as time_sec,
min(value_double) as value,
metric_name as metric
$__timeGroup(time_date_time,'5m'),
min(value_double),
'min' as metric
FROM test_data
WHERE $__timeFilter(time_date_time)
GROUP BY 1, metric_name
ORDER BY 1
GROUP BY time
ORDER BY time
```
**Example using the fill parameter in the $__timeGroup macro to convert null values to be zero instead:**
```sql
SELECT
$__timeGroup(createdAt,'5m',0),
sum(value_double) as value,
measurement
FROM test_data
WHERE
$__timeFilter(createdAt)
GROUP BY time, measurement
ORDER BY time
```
**Example with multiple columns:**
```sql
SELECT
$__timeGroup(time_date_time,'5m'),
min(value_double) as min_value,
max(value_double) as max_value
FROM test_data
WHERE $__timeFilter(time_date_time)
GROUP BY time
ORDER BY time
```
Currently, there is no support for a dynamic group by time based on time range & panel width.
@ -180,7 +206,7 @@ There are two syntaxes:
```sql
SELECT
UNIX_TIMESTAMP(atimestamp) as time_sec,
UNIX_TIMESTAMP(atimestamp) as time,
aint as value,
avarchar as metric
FROM my_table
@ -192,7 +218,7 @@ ORDER BY atimestamp ASC
```sql
SELECT
UNIX_TIMESTAMP(atimestamp) as time_sec,
UNIX_TIMESTAMP(atimestamp) as time,
aint as value,
avarchar as metric
FROM my_table
@ -200,28 +226,68 @@ WHERE $__timeFilter(atimestamp) and hostname in([[hostname]])
ORDER BY atimestamp ASC
```
#### Disabling Quoting for Multi-value Variables
Grafana automatically creates a quoted, comma-separated string for multi-value variables. For example: if `server01` and `server02` are selected then it will be formatted as: `'server01', 'server02'`. Do disable quoting, use the csv formatting option for variables:
`${servers:csv}`
Read more about variable formatting options in the [Variables]({{< relref "reference/templating.md#advanced-formatting-options" >}}) documentation.
## Annotations
[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation queries via the Dashboard menu / Annotations view.
[Annotations]({{< relref "reference/annotations.md" >}}) allow you to overlay rich event information on top of graphs. You add annotation queries via the Dashboard menu / Annotations view.
An example query:
**Example query using time column with epoch values:**
```sql
SELECT
UNIX_TIMESTAMP(atimestamp) as time_sec,
value as text,
epoch_time as time,
metric1 as text,
CONCAT(tag1, ',', tag2) as tags
FROM my_table
WHERE $__timeFilter(atimestamp)
ORDER BY atimestamp ASC
FROM
public.test_data
WHERE
$__unixEpochFilter(epoch_time)
```
**Example query using time column of native sql date/time data type:**
```sql
SELECT
native_date_time as time,
metric1 as text,
CONCAT(tag1, ',', tag2) as tags
FROM
public.test_data
WHERE
$__timeFilter(native_date_time)
```
Name | Description
------------ | -------------
time_sec | The name of the date/time field.
time | The name of the date/time field. Could be a column with a native sql date/time data type or epoch value.
text | Event description field.
tags | Optional field name to use for event tags as a comma separated string.
## Alerting
Time series queries should work in alerting conditions. Table formatted queries is not yet supported in alert rule conditions.
## Configure the Datasource with Provisioning
It's now possible to configure datasources using config files with Grafana's provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: MySQL
type: mysql
url: localhost:3306
database: grafana
user: grafana
password: password
```

View File

@ -28,11 +28,10 @@ Name | Description
*Name* | The data source name. This is how you refer to the data source in panels & queries.
*Default* | Default data source means that it will be pre-selected for new panels.
*Url* | The http protocol, ip and port of you opentsdb server (default port is usually 4242)
*Access* | Proxy = access via Grafana backend, Direct = access directly from browser.
*Access* | Server (default) = URL needs to be accessible from the Grafana backend/server, Browser = URL needs to be accessible from the browser.
*Version* | Version = opentsdb version, either <=2.1 or 2.2
*Resolution* | Metrics from opentsdb may have datapoints with either second or millisecond resolution.
## Query editor
Open a graph in edit mode by click the title. Query editor will differ if the datasource has version <=2.1 or = 2.2.
@ -78,7 +77,7 @@ the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` o
### Nested Templating
One template variable can be used to filter tag values for another template varible. First parameter is the metric name,
One template variable can be used to filter tag values for another template variable. First parameter is the metric name,
second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables.
Some examples are mentioned below to make nested template queries work successfully.
@ -88,3 +87,22 @@ Query | Description
*tag_values(cpu, hostanme, env=$env, region=$region)* | Return tag values for cpu metric, selected env tag value, selected region tag value and tag key hostname
For details on OpenTSDB metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html)
## Configure the Datasource with Provisioning
It's now possible to configure datasources using config files with Grafana's provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: OpenTsdb
type: opentsdb
access: proxy
url: http://localhost:4242
jsonData:
tsdbResolution: 1
tsdbVersion: 1
```

View File

@ -20,6 +20,18 @@ Grafana ships with a built-in PostgreSQL data source plugin that allows you to q
3. Click the `+ Add data source` button in the top header.
4. Select *PostgreSQL* from the *Type* dropdown.
### Data source options
Name | Description
------------ | -------------
*Name* | The data source name. This is how you refer to the data source in panels & queries.
*Default* | Default data source means that it will be pre-selected for new panels.
*Host* | The IP address/hostname and optional port of your PostgreSQL instance.
*Database* | Name of your PostgreSQL database.
*User* | Database user's login/username
*Password* | Database user's password
*SSL Mode* | This option determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
### Database User Permissions (Important!)
The database user you specify when you add the data source should only be granted SELECT permissions on
@ -45,11 +57,12 @@ Macro example | Description
------------ | -------------
*$__time(dateColumn)* | Will be replaced by an expression to rename the column to `time`. For example, *dateColumn as time*
*$__timeSec(dateColumn)* | Will be replaced by an expression to rename the column to `time` and converting the value to unix timestamp. For example, *extract(epoch from dateColumn) as time*
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *extract(epoch from dateColumn) BETWEEN 1494410783 AND 1494497183*
*$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *to_timestamp(1494410783)*
*$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *to_timestamp(1494497183)*
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn BETWEEN '2017-04-21T05:01:17Z' AND '2017-04-21T05:06:17Z'*
*$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *'2017-04-21T05:01:17Z'*
*$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *'2017-04-21T05:06:17Z'*
*$__timeGroup(dateColumn,'5m')* | Will be replaced by an expression usable in GROUP BY clause. For example, *(extract(epoch from dateColumn)/300)::bigint*300 AS time*
*$__unixEpochFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name with times represented as unix timestamp. For example, *dateColumn > 1494410783 AND dateColumn < 1494497183*
*$__timeGroup(dateColumn,'5m', 0)* | Same as above but with a fill parameter so all null values will be converted to the fill value (all null values would be set to zero using this example).
*$__unixEpochFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name with times represented as unix timestamp. For example, *dateColumn >= 1494410783 AND dateColumn <= 1494497183*
*$__unixEpochFrom()* | Will be replaced by the start of the currently active time selection as unix timestamp. For example, *1494410783*
*$__unixEpochTo()* | Will be replaced by the end of the currently active time selection as unix timestamp. For example, *1494497183*
@ -82,36 +95,50 @@ You can control the name of the Table panel columns by using regular `as ` SQL c
The resulting table panel:
![](/img/docs/v46/postgres_table.png)
![postgres table](/img/docs/v46/postgres_table.png)
### Time series queries
## Time series queries
If you set `Format as` to `Time series`, for use in Graph panel for example, then the query must return a column named `time` that returns either a sql datetime or any numeric datatype representing unix epoch in seconds.
If you set `Format as` to `Time series`, for use in Graph panel for example, then the query must return a column named `time` that returns either a sql datetime or any numeric datatype representing unix epoch.
Any column except `time` and `metric` is treated as a value column.
You may return a column named `metric` that is used as metric name for the value column.
Example with `metric` column
**Example with `metric` column:**
```sql
SELECT
$__timeGroup(time_date_time,'5m'),
min(value_double),
$__timeGroup("time_date_time",'5m'),
min("value_double"),
'min' as metric
FROM test_data
WHERE $__timeFilter(time_date_time)
WHERE $__timeFilter("time_date_time")
GROUP BY time
ORDER BY time
```
Example with multiple columns:
**Example using the fill parameter in the $__timeGroup macro to convert null values to be zero instead:**
```sql
SELECT
$__timeGroup(time_date_time,'5m'),
min(value_double) as min_value,
max(value_double) as max_value
$__timeGroup("createdAt",'5m',0),
sum(value) as value,
measurement
FROM test_data
WHERE $__timeFilter(time_date_time)
WHERE
$__timeFilter("createdAt")
GROUP BY time, measurement
ORDER BY time
```
**Example with multiple columns:**
```sql
SELECT
$__timeGroup("time_date_time",'5m'),
min("value_double") as "min_value",
max("value_double") as "max_value"
FROM test_data
WHERE $__timeFilter("time_date_time")
GROUP BY time
ORDER BY time
```
@ -190,26 +217,47 @@ WHERE $__timeFilter(atimestamp) and hostname in([[hostname]])
ORDER BY atimestamp ASC
```
#### Disabling Quoting for Multi-value Variables
Grafana automatically creates a quoted, comma-separated string for multi-value variables. For example: if `server01` and `server02` are selected then it will be formatted as: `'server01', 'server02'`. Do disable quoting, use the csv formatting option for variables:
`${servers:csv}`
Read more about variable formatting options in the [Variables]({{< relref "reference/templating.md#advanced-formatting-options" >}}) documentation.
## Annotations
[Annotations]({{< relref "reference/annotations.md" >}}) allow you to overlay rich event information on top of graphs. You add annotation queries via the Dashboard menu / Annotations view.
An example query:
**Example query using time column with epoch values:**
```sql
SELECT
extract(epoch from time_date_time) AS time,
metric1 as text,
epoch_time as time,
metric1 as text,
concat_ws(', ', metric1::text, metric2::text) as tags
FROM
public.test_data
WHERE
$__timeFilter(time_date_time)
$__unixEpochFilter(epoch_time)
```
**Example query using time column of native sql date/time data type:**
```sql
SELECT
native_date_time as time,
metric1 as text,
concat_ws(', ', metric1::text, metric2::text) as tags
FROM
public.test_data
WHERE
$__timeFilter(native_date_time)
```
Name | Description
------------ | -------------
time | The name of the date/time field.
time | The name of the date/time field. Could be a column with a native sql date/time data type or epoch value.
text | Event description field.
tags | Optional field name to use for event tags as a comma separated string.
@ -217,3 +265,24 @@ tags | Optional field name to use for event tags as a comma separated string.
Time series queries should work in alerting conditions. Table formatted queries is not yet supported in alert rule
conditions.
## Configure the Datasource with Provisioning
It's now possible to configure datasources using config files with Grafana's provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Postgres
type: postgres
url: localhost:5432
database: grafana
user: grafana
secureJsonData:
password: "Password!"
jsonData:
sslmode: "disable" # disable/require/verify-ca/verify-full
```

View File

@ -30,11 +30,11 @@ Name | Description
*Name* | The data source name. This is how you refer to the data source in panels & queries.
*Default* | Default data source means that it will be pre-selected for new panels.
*Url* | The http protocol, ip and port of you Prometheus server (default port is usually 9090)
*Access* | Proxy = access via Grafana backend, Direct = access directly from browser.
*Access* | Server (default) = URL needs to be accessible from the Grafana backend/server, Browser = URL needs to be accessible from the browser.
*Basic Auth* | Enable basic authentication to the Prometheus data source.
*User* | Name of your Prometheus user
*Password* | Database user's password
*Scrape interval* | This will be used as a lower limit for the Prometheus step query parameter. Default value is 15s.
*Scrape interval* | This will be used as a lower limit for the Prometheus step query parameter. Default value is 15s.
## Query editor
@ -50,7 +50,7 @@ Name | Description
*Min step* | Set a lower limit for the Prometheus step option. Step controls how big the jumps are when the Prometheus query engine performs range queries. Sadly there is no official prometheus documentation to link to for this very important option.
*Resolution* | Controls the step option. Small steps create high-resolution graphs but can be slow over larger time ranges, lowering the resolution can speed things up. `1/2` will try to set step option to generate 1 data point for every other pixel. A value of `1/10` will try to set step option so there is a data point every 10 pixels.
*Metric lookup* | Search for metric names in this input field.
*Format as* | **(New in v4.3)** Switch between Table & Time series. Table format will only work in the Table panel.
*Format as* | Switch between Table, Time series or Heatmap. Table format will only work in the Table panel. Heatmap format is suitable for displaying metrics having histogram type on Heatmap panel. Under the hood, it converts cumulative histogram to regular and sorts series by the bucket bound.
## Templating
@ -100,3 +100,19 @@ The step option is useful to limit the number of events returned from your query
## Getting Grafana metrics into Prometheus
Since 4.6.0 Grafana exposes metrics for Prometheus on the `/metrics` endpoint. We also bundle a dashboard within Grafana so you can get started viewing your metrics faster. You can import the bundled dashboard by going to the data source edit page and click the dashboard tab. There you can find a dashboard for Grafana and one for Prometheus. Import and start viewing all the metrics!
## Configure the Datasource with Provisioning
It's now possible to configure datasources using config files with Grafana's provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://localhost:9090
```

View File

@ -14,7 +14,7 @@ weight = 4
{{< docs-imagebox img="/img/docs/v45/alert-list-panel.png" max-width="850px" >}}
The alert list panel allows you to display your dashbords alerts. The list can be configured to show current state or recent state changes. You can read more about alerts [here](http://docs.grafana.org/alerting/rules).
The alert list panel allows you to display your dashboards alerts. The list can be configured to show current state or recent state changes. You can read more about alerts [here](http://docs.grafana.org/alerting/rules).
## Alert List Options

View File

@ -25,7 +25,7 @@ The dashboard list panel allows you to display dynamic links to other dashboards
1. **Starred**: The starred dashboard selection displays starred dashboards in alphabetical order.
2. **Recently Viewed**: The recently viewed dashboard selection displays recently viewed dashboards in alphabetical order.
3. **Search**: The search dashboard selection displays dashboards by search query or tag(s).
4. **Show Headings**: When show headings is ticked the choosen list selection(Starred, Recently Viewed, Search) is shown as a heading.
4. **Show Headings**: When show headings is ticked the chosen list selection(Starred, Recently Viewed, Search) is shown as a heading.
5. **Max Items**: Max items set the maximum of items in a list.
6. **Query**: Here is where you enter your query you want to search by. Queries are case-insensitive, and partial values are accepted.
7. **Tags**: Here is where you enter your tag(s) you want to search by. Note that existing tags will not appear as you type, and *are* case sensitive. To see a list of existing tags, you can always return to the dashboard, open the Dashboard Picker at the top and click `tags` link in the search bar.

View File

@ -22,15 +22,18 @@ options for the panel.
## General
{{< docs-imagebox img="/img/docs/v43/graph_general.png" max-width= "900px" >}}
{{< docs-imagebox img="/img/docs/v51/graph_general.png" max-width= "800px" >}}
The general tab allows customization of a panel's appearance and menu options.
### General Options
### Info
- **Title** - The panel title on the dashboard
- **Span** - The panel width in columns
- **Height** - The panel contents height in pixels
- **Title** - The panel title of the dashboard, displayed at the top.
- **Description** - The panel description, displayed on hover of info icon in the upper left corner of the panel.
- **Transparent** - If checked, removes the solid background of the panel (default not checked).
### Repeat
Repeat a panel for each value of a variable. Repeating panels are described in more detail [here]({{< relref "reference/templating.md#repeating-panels" >}}).
### Drilldown / detail link
@ -54,47 +57,65 @@ options.
## Axes
{{< docs-imagebox img="/img/docs/v43/graph_axes_grid_options.png" max-width= "900px" >}}
{{< docs-imagebox img="/img/docs/v51/graph_axes_grid_options.png" max-width= "800px" >}}
The Axes tab controls the display of axes, grids and legend. The **Left Y** and **Right Y** can be customized using:
The Axes tab controls the display of axes.
### Left Y/Right Y
The **Left Y** and **Right Y** can be customized using:
- **Unit** - The display unit for the Y value
- **Scale** -
- **Scale** - The scale to use for the Y value, linear or logarithmic. (default linear)
- **Y-Min** - The minimum Y value. (default auto)
- **Y-Max** - The maximum Y value. (default auto)
- **Decimals** - Controls how many decimals are displayed for Y value (default auto)
- **Label** - The Y axis label (default "")
Axes can also be hidden by unchecking the appropriate box from **Show**.
### X-Axis Mode
### X-Axis
There are three options:
Axis can be hidden by unchecking **Show**.
For **Mode** there are three options:
- The default option is **Time** and means the x-axis represents time and that the data is grouped by time (for example, by hour or by minute).
- The **Series** option means that the data is grouped by series and not by time. The y-axis still represents the value.
{{< docs-imagebox img="/img/docs/v45/graph-x-axis-mode-series.png" max-width="700px">}}
{{< docs-imagebox img="/img/docs/v51/graph-x-axis-mode-series.png" max-width="800px">}}
- The **Histogram** option converts the graph into a histogram. A Histogram is a kind of bar chart that groups numbers into ranges, often called buckets or bins. Taller bars show that more data falls in that range. Histograms and buckets are described in more detail [here](http://docs.grafana.org/features/panels/heatmap/#histograms-and-buckets).
<img src="/img/docs/v43/heatmap_histogram.png" class="no-shadow">
### Legend
The legend hand be hidden by checking the **Show** checkbox. If it's shown, it can be
displayed as a table of values by checking the **Table** checkbox. Series with no
values can be hidden from the legend using the **Hide empty** checkbox.
### Y-Axes
### Legend Values
- **Align** - Check to align left and right Y-axes by value (default unchecked/false)
- **Level** - Available when *Align* is checked. Value to use for alignment of left and right Y-axes, starting from Y=0 (default 0)
## Legend
{{< docs-imagebox img="/img/docs/v51/graph-legend.png" max-width= "800px" >}}
### Options
- **Show** - Uncheck to hide the legend (default checked/true)
- **Table** - Check to display legend in table (default unchecked/false)
- **To the right** - Check to display legend to the right (default unchecked/false)
- **Width** - Available when *To the right* is checked. Value to control the minimum width for the legend (default 0)
### Values
Additional values can be shown along-side the legend names:
- **Total** - Sum of all values returned from metric query
- **Current** - Last value returned from the metric query
- **Min** - Minimum of all values returned from metric query
- **Max** - Maximum of all values returned from the metric query
- **Avg** - Average of all values returned from metric query
- **Current** - Last value returned from the metric query
- **Total** - Sum of all values returned from metric query
- **Decimals** - Controls how many decimals are displayed for legend values (and graph hover tooltips)
The legend values are calculated client side by Grafana and depend on what type of
@ -103,63 +124,72 @@ be correct at the same time. For example if you plot a rate like requests/second
using average as aggregator, then the Total in the legend will not represent the total number of requests.
It is just the sum of all data points received by Grafana.
### Hide series
Hide series when all values of a series from a metric query are of a specific value:
- **With only nulls** - Value=*null* (default unchecked)
- **With only zeros** - Value=*zero* (default unchecked)
## Display styles
{{< docs-imagebox img="/img/docs/v43/graph_display_styles.png" max-width= "900px" >}}
{{< docs-imagebox img="/img/docs/v51/graph_display_styles.png" max-width= "800px" >}}
Display styles control visual properties of the graph.
### Thresholds
### Draw Options
Thresholds allow you to add arbitrary lines or sections to the graph to make it easier to see when
the graph crosses a particular threshold.
### Chart Options
#### Draw Modes
- **Bar** - Display values as a bar chart
- **Lines** - Display values as a line graph
- **Points** - Display points for values
### Line Options
#### Mode Options
- **Line Fill** - Amount of color fill for a series. 0 is none.
- **Line Width** - The width of the line for a series.
- **Null point mode** - How null values are displayed
- **Staircase line** - Draws adjacent points as staircase
- **Fill** - Amount of color fill for a series (default 1). 0 is none.
- **Line Width** - The width of the line for a series (default 1).
- **Staircase** - Draws adjacent points as staircase
- **Points Radius** - Adjust the size of points when *Points* are selected as *Draw Mode*.
### Multiple Series
#### Hover tooltip
- **Mode** - Controls how many series to display in the tooltip when hover over a point in time, All series or single (default All series).
- **Sort order** - Controls how series displayed in tooltip are sorted, None, Ascending or Descending (default None).
- **Stacked value** - Available when *Stack* are checked and controls how stacked values are displayed in tooltip (default Individual).
- Individual: the value for the series you hover over
- Cumulative - sum of series below plus the series you hover over
#### Stacking & Null value
If there are multiple series, they can be displayed as a group.
- **Stack** - Each series is stacked on top of another
- **Percent** - Each series is drawn as a percentage of the total of all series
- **Percent** - Available when *Stack* are checked. Each series is drawn as a percentage of the total of all series
- **Null value** - How null values are displayed
If you have stack enabled, you can select what the mouse hover feature should show.
### Series overrides
- Cumulative - Sum of series below plus the series you hover over
- Individual - Just the value for the series you hover over
### Rendering
- **Flot** - Render the graphs in the browser using Flot (default)
- **Graphite PNG** - Render the graph on the server using graphite's render API.
### Tooltip
- **All series** - Show all series on the same tooltip and a x crosshairs to help follow all series
### Series Specific Overrides
{{< docs-imagebox img="/img/docs/v51/graph_display_overrides.png" max-width= "800px" >}}
The section allows a series to be rendered differently from the others. For example, one series can be given
a thicker line width to make it stand out.
a thicker line width to make it stand out and/or be moved to the right Y-axis.
#### Dashes Drawing Style
There is an option under Series overrides to draw lines as dashes. Set Dashes to the value True to override the line draw setting for a specific series.
### Thresholds
{{< docs-imagebox img="/img/docs/v51/graph_display_thresholds.png" max-width= "800px" >}}
Thresholds allow you to add arbitrary lines or sections to the graph to make it easier to see when
the graph crosses a particular threshold.
## Time Range
The time range tab allows you to override the dashboard time range and specify a panel specific time. Either through a relative from now time option or through a timeshift.
{{< docs-imagebox img="/img/docs/v51/graph-time-range.png" max-width= "900px" >}}
{{< docs-imagebox img="/img/docs/v45/graph-time-range.png" max-width= "900px" >}}
The time range tab allows you to override the dashboard time range and specify a panel specific time.
Either through a relative from now time option or through a timeshift.
Panel time overrides & timeshift are described in more detail [here]({{< relref "reference/timerange.md#panel-time-overrides-timeshift" >}}).

View File

@ -56,26 +56,39 @@ Data and bucket options can be found in the `Axes` tab.
Data format | Description
------------ | -------------
*Time series* | Grafana does the bucketing by going through all time series values. The bucket sizes & intervals will be determined using the Buckets options.
*Time series buckets* | Each time series already represents a Y-Axis bucket. The time series name (alias) needs to be a numeric value representing the upper interval for the bucket. Grafana does no bucketing so the bucket size options are hidden.
*Time series buckets* | Each time series already represents a Y-Axis bucket. The time series name (alias) needs to be a numeric value representing the upper or lower interval for the bucket. Grafana does no bucketing so the bucket size options are hidden.
### Bucket bound
When Data format is *Time series buckets* datasource returns series with names representing bucket bound. But depending
on datasource, a bound may be *upper* or *lower*. This option allows to adjust a bound type. If *Auto* is set, a bound
option will be chosen based on panels' datasource type.
### Bucket Size
The Bucket count & size options are used by Grafana to calculate how big each cell in the heatmap is. You can
define the bucket size either by count (the first input box) or by specifying a size interval. For the Y-Axis
the size interval is just a value but for the X-bucket you can specify a time range in the *Size* input, for example,
the time range `1h`. This will make the cells 1h wide on the X-axis.
the time range `1h`. This will make the cells 1h wide on the X-axis.
### Pre-bucketed data
If you have a data that is already organized into buckets you can use the `Time series buckets` data format. This format requires that your metric query return regular time series and that each time series has a numeric name
that represent the upper or lower bound of the interval.
If you have a data that is already organized into buckets you can use the `Time series buckets` data format. This format
requires that your metric query return regular time series and that each time series has a numeric name that represent
the upper or lower bound of the interval.
The only data source that supports histograms over time is Elasticsearch. You do this by adding a *Histogram*
bucket aggregation before the *Date Histogram*.
There are a number of datasources supporting histogram over time like Elasticsearch (by using a Histogram bucket
aggregation) or Prometheus (with [histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) metric type
and *Format as* option set to Heatmap). But generally, any datasource could be used if it meets the requirements:
returns series with names representing bucket bound or returns sereis sorted by the bound in ascending order.
![](/img/docs/v43/elastic_histogram.png)
With Elasticsearch you control the size of the buckets using the Histogram interval (Y-Axis) and the Date Histogram interval (X-axis).
You control the size of the buckets using the Histogram interval (Y-Axis) and the Date Histogram interval (X-axis).
![Elastic histogram](/img/docs/v43/elastic_histogram.png)
With Prometheus you can only control X-axis by adjusting *Min step* and *Resolution* options.
![Prometheus histogram](/img/docs/v51/prometheus_histogram.png)
## Display Options
@ -100,8 +113,8 @@ but include a group by time interval or maxDataPoints limit coupled with an aggr
This all depends on the time range of your query of course. But the important point is to know that the Histogram bucketing
that Grafana performs may be done on already aggregated and averaged data. To get more accurate heatmaps it is better
to do the bucketing during metric collection or store the data in Elasticsearch, which currently is the only data source
data supports doing Histogram bucketing on the raw data.
to do the bucketing during metric collection or store the data in Elasticsearch, or in the other data source which
supports doing Histogram bucketing on the raw data.
If you remove or lower the group by time (or raise maxDataPoints) in your query to return more data points your heatmap will be
more accurate but this can also be very CPU & Memory taxing for your browser and could cause hangs and crashes if the number of

View File

@ -30,7 +30,7 @@ The singlestat panel has a normal query editor to allow you define your exact me
* **total** - The sum of all the non-null values in the series
* **first** - The first value in the series
* **delta** - The total incremental increase (of a counter) in the series. An attempt is made to account for counter resets, but this will only be accurate for single instance metrics. Used to show total counter increase in time series.
* **diff** - The difference betwen 'current' (last value) and 'first'.
* **diff** - The difference between 'current' (last value) and 'first'.
* **range** - The difference between 'min' and 'max'. Useful the show the range of change for a gauge.
2. **Prefix/Postfix**: The Prefix/Postfix fields let you define a custom label to appear *before/after* the value. The `$__name` variable can be used here to use the series name or alias from the metric query.
3. **Units**: Units are appended to the the Singlestat within the panel, and will respect the color and threshold settings for the value.
@ -70,18 +70,18 @@ Gauges gives a clear picture of how high a value is in it's context. It's a grea
{{< docs-imagebox img="/img/docs/v45/singlestat-gauge-options.png" max-width="500px" class="docs-image--right docs-image--no-shadow">}}
1. **Show**: The show checkbox will toggle wether the gauge is shown in the panel. When unselected, only the Singlestat value will appear.
1. **Show**: The show checkbox will toggle whether the gauge is shown in the panel. When unselected, only the Singlestat value will appear.
2. **Min/Max**: This sets the start and end point for the gauge.
3. **Threshold Labels**: Check if you want to show the threshold labels. Thresholds are set in the color options.
4. **Threshold Markers**: Check if you want to have a second meter showing the thresholds.
<div class="clearfix"></div>
### Value to text mapping
### Value/Range to text mapping
{{< docs-imagebox img="/img/docs/v45/singlestat-value-mapping.png" class="docs-image--right docs-image--no-shadow">}}
Value to text mapping allows you to translate the value of the summary stat into explicit text. The text will respect all styling, thresholds and customization defined for the value. This can be useful to translate the number of the main Singlestat value into a context-specific human-readable word or message.
Value/Range to text mapping allows you to translate the value of the summary stat into explicit text. The text will respect all styling, thresholds and customization defined for the value. This can be useful to translate the number of the main Singlestat value into a context-specific human-readable word or message.
<div class="clearfix"></div>

View File

@ -97,3 +97,14 @@ The column styles allow you control how dates and numbers are formatted.
4. **Thresholds and Coloring**: Specify color mode and thresholds limits.
5. **Type**: The three supported types of types are **Number**, **String** and **Date**. **Unit** and **Decimals**: Specify unit and decimal precision for numbers. **Format**: Specify date format for dates.
### String
#### Value/Range to text mapping
> Only available in Grafana v5.1+.
{{< docs-imagebox img="/img/docs/v51/table-value-mapping.png" class="docs-image--right docs-image--no-shadow">}}
Value/range to text mapping allows you to translate numeric values into explicit text. The text will respect all styling, thresholds and customization defined for the value. This can be useful to translate the numeric values into a context-specific human-readable word or message.
<div class="clearfix"></div>

View File

@ -15,7 +15,7 @@ support for multiple Cloudwatch credentials.
<img src="/assets/img/features/table-panel.png">
The new table panel is very flexible, supporting both multiple modes for time series as well as for
table, annotation and raw JSON data. It also provides date formating and value formating and coloring options.
table, annotation and raw JSON data. It also provides date formatting and value formatting and coloring options.
### Time series to rows

View File

@ -33,7 +33,7 @@ You can enable/disable the shared tooltip from the dashboard settings menu or cy
{{< imgbox max-width="60%" img="/img/docs/v41/helptext_for_panel_settings.png" caption="Hovering help text" >}}
You can set a help text in the general tab on any panel. The help text is using Markdown to enable better formating and linking to other sites that can provide more information.
You can set a help text in the general tab on any panel. The help text is using Markdown to enable better formatting and linking to other sites that can provide more information.
<div class="clearfix"></div>

View File

@ -12,7 +12,7 @@ weight = -4
# What's New in Grafana v4.5
## Hightlights
## Highlights
### New prometheus query editor
@ -62,7 +62,7 @@ Datas source selection & options & help are now above your metric queries.
### Minor Changes
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
* **InfluxDB**: Added parenthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
## Bug Fixes

View File

@ -45,7 +45,7 @@ This makes exploring and filtering Prometheus data much easier.
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Graph**: Add support for local formatting in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/2764)
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)

View File

@ -0,0 +1,125 @@
+++
title = "What's New in Grafana v5.1"
description = "Feature & improvement highlights for Grafana v5.1"
keywords = ["grafana", "new", "documentation", "5.1"]
type = "docs"
[menu.docs]
name = "Version 5.1"
identifier = "v5.1"
parent = "whatsnew"
weight = -7
+++
# What's New in Grafana v5.1
Grafana v5.1 brings new features, many enhancements and bug fixes. This article will detail the major new features and enhancements.
* [Improved scrolling experience]({{< relref "#improved-scrolling-experience" >}})
* [Improved docker image]({{< relref "#improved-docker-image-breaking-change" >}}) with a breaking change!
* [Heatmap support for Prometheus]({{< relref "#prometheus" >}})
* [Microsoft SQL Server]({{< relref "#microsoft-sql-server" >}}) as metric & table datasource!
* [Dashboards & Panels]({{< relref "#dashboards-panels" >}}) Improved adding panels to dashboards and enhancements to Graph and Table panels.
* [New variable interpolation syntax]({{< relref "#new-variable-interpolation-syntax" >}})
* [Improved workflow for provisioned dashboards]({{< relref "#improved-workflow-for-provisioned-dashboards" >}})
## Improved scrolling experience
In Grafana v5.0 we introduced a new scrollbar component. Unfortunately this introduced a lot of issues and in some scenarios removed
the native scrolling functionality. Grafana v5.1 ships with a native scrollbar for all pages together with a scrollbar component for
the dashboard grid and panels that's not overriding the native scrolling functionality. We hope that these changes and improvements should
make the Grafana user experience much better!
## Improved docker image (breaking change)
Grafana v5.1 brings an improved official docker image which should make it easier to run and use the Grafana docker image and at the same time give more control to the user how to use/run it.
We've switched the id of the grafana user running Grafana inside a docker container. Unfortunately this means that files created prior to 5.1 won't have the correct permissions for later versions and thereby this introduces a breaking change.
We made this change so that it would be easier for you to control what user Grafana is executed as (see examples below).
Version | User | User ID
--------|---------|---------
< 5.1 | grafana | 104
>= 5.1 | grafana | 472
Please read the [updated documentation](/installation/docker/#migration-from-a-previous-version-of-the-docker-container-to-5-1-or-later) which includes migration instructions and more information.
## Prometheus
{{< docs-imagebox img="/img/docs/v51/prometheus_heatmap.png" max-width="800px" class="docs-image--right" >}}
The Prometheus datasource now support transforming Prometheus histograms to the heatmap panel. Prometheus histogram is a powerful feature, and we're
really happy to finally allow our users to render those as heatmaps. Please read [Heatmap panel documentation](/features/panels/heatmap/#pre-bucketed-data)
for more information on how to use it.
Prometheus query editor also got support for autocomplete of template variables. More information in the [Prometheus data source documentation](/features/datasources/prometheus/).
<div class="clearfix"></div>
## Microsoft SQL Server
{{< docs-imagebox img="/img/docs/v51/mssql_query_editor_showcase.png" max-width= "800px" class="docs-image--right" >}}
Grafana v5.1 now ships with a built-in Microsoft SQL Server (MSSQL) data source plugin that allows you to query and visualize data from any
Microsoft SQL Server 2005 or newer, including Microsoft Azure SQL Database. Do you have metric or log data in MSSQL? You can now visualize
that data and define alert rules on it like with any of Grafana's other core datasources.
Please read [Using Microsoft SQL Server in Grafana documentation](/features/datasources/mssql/) for more detailed information on how to get started and use it.
<div class="clearfix"></div>
## Dashboards & Panels
### Adding new panels to dashboards
{{< docs-imagebox img="/img/docs/v51/dashboard_add_panel.png" max-width= "800px" class="docs-image--right" >}}
The control for adding new panels to dashboards have got some enhancements and now includes functionality to search for the type of panel
you want to add. Further, the control has tabs separating functionality for adding new panels and pasting
copied panels.
By copying a panel in a dashboard it will be displayed in the `Paste` tab in *any* dashboard and allows you to paste the
copied panel into the current dashboard.
{{< docs-imagebox img="/img/docs/v51/dashboard_panel_copy.png" max-width= "300px" >}}
<div class="clearfix"></div>
### Graph Panel
New enhancements includes support for multiple series stacking in histogram mode, thresholds for right Y axis, aligning left and right Y-axes to one level and additional units. More information in the [Graph panel documentation](/features/panels/graph/).
### Table Panel
New enhancements includes support for mapping a numeric value/range to text and additional units. More information in the [Table panel documentation](/features/panels/table_panel/#string).
## New variable interpolation syntax
We now support a new option for rendering variables that gives the user full control of how the value(s) should be rendered.
In the table below you can see some examples and you can find all different options in the [Variables documentation](http://docs.grafana.org/reference/templating/#advanced-formatting-options).
Filter Option | Example | Raw | Interpolated | Description
------------ | ------------- | ------------- | ------------- | -------------
`glob` | ${servers:glob} | `'test1', 'test2'` | `{test1,test2}` | Formats multi-value variable into a glob
`regex` | ${servers:regex} | `'test.', 'test2'` | <code>(test\.&#124;test2)</code> | Formats multi-value variable into a regex string
`pipe` | ${servers:pipe} | `'test.', 'test2'` | <code>test.&#124;test2</code> | Formats multi-value variable into a pipe-separated string
`csv`| ${servers:csv} | `'test1', 'test2'` | `test1,test2` | Formats multi-value variable as a comma-separated string
## Improved workflow for provisioned dashboards
{{< docs-imagebox img="/img/docs/v51/provisioning_cannot_save_dashboard.png" max-width="800px" class="docs-image--right" >}}
Grafana v5.1 brings an improved workflow for provisioned dashboards:
* A populated `id` property in JSON is now automatically removed when provisioning dashboards.
* When making changes to a provisioned dashboard you can `Save` the dashboard which now will bring up a *Cannot save provisioned dashboard* dialog like seen in the screenshot to the right.
Available options in the dialog will let you `Copy JSON to Clipboard` and/or `Save JSON to file` which can help you synchronize your dashboard changes back to the provisioning source.
More information in the [Provisioning documentation](/features/datasources/prometheus/).
<div class="clearfix"></div>
## Changelog
Checkout the [CHANGELOG.md](https://github.com/grafana/grafana/blob/master/CHANGELOG.md) file for a complete list
of new features, changes, and bug fixes.

View File

@ -32,10 +32,12 @@ Query Parameters:
- `from`: epoch datetime in milliseconds. Optional.
- `to`: epoch datetime in milliseconds. Optional.
- `limit`: number. Optional - default is 10. Max limit for results returned.
- `limit`: number. Optional - default is 100. Max limit for results returned.
- `alertId`: number. Optional. Find annotations for a specified alert.
- `dashboardId`: number. Optional. Find annotations that are scoped to a specific dashboard
- `panelId`: number. Optional. Find annotations that are scoped to a specific panel
- `userId`: number. Optional. Find annotations created by a specific user
- `type`: string. Optional. `alert`|`annotation` Return alerts or user created annotations
- `tags`: string. Optional. Use this to filter global annotations. Global annotations are annotations from an annotation data source that are not connected specifically to a dashboard or panel. To do an "AND" filtering with multiple tags, specify the tags parameter multiple times e.g. `tags=tag1&tags=tag2`.
**Example Response**:
@ -180,14 +182,14 @@ Content-Type: application/json
## Delete Annotation By Id
`DELETE /api/annotation/:id`
`DELETE /api/annotations/:id`
Deletes the annotation that matches the specified id.
**Example Request**:
```http
DELETE /api/annotation/1 HTTP/1.1
DELETE /api/annotations/1 HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
@ -204,14 +206,14 @@ Content-Type: application/json
## Delete Annotation By RegionId
`DELETE /api/annotation/region/:id`
`DELETE /api/annotations/region/:id`
Deletes the annotation that matches the specified region id. A region is an annotation that covers a timerange and has a start and end time. In the Grafana database, this is a stored as two annotations connected by a region id.
**Example Request**:
```http
DELETE /api/annotation/region/1 HTTP/1.1
DELETE /api/annotations/region/1 HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk

View File

@ -188,8 +188,8 @@ Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
"defaultRegion": "us-west-1"
},
"secureJsonData": {
"accessKey": "Ol4pIDpeKSA6XikgOl4p",
"secretKey": "dGVzdCBrZXkgYmxlYXNlIGRvbid0IHN0ZWFs"
"accessKey": "Ol4pIDpeKSA6XikgOl4p", //should not be encoded
"secretKey": "dGVzdCBrZXkgYmxlYXNlIGRvbid0IHN0ZWFs" //should be Base-64 encoded
}
}
```

View File

@ -307,7 +307,7 @@ Content-Type: application/json
`PUT /api/orgs/:orgId`
Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented yet.
Update Organisation, fields *Address 1*, *Address 2*, *City* are not implemented yet.
**Example Request**:
@ -436,4 +436,4 @@ HTTP/1.1 200
Content-Type: application/json
{"message":"User removed from organization"}
```
```

View File

@ -53,7 +53,7 @@ server {
```bash
[server]
domain = foo.bar
root_url = %(protocol)s://%(domain)s:/grafana
root_url = %(protocol)s://%(domain)s/grafana/
```
#### Nginx configuration with sub path
@ -98,7 +98,7 @@ Given:
```bash
[server]
domain = localhost:8080
root_url = %(protocol)s://%(domain)s:/grafana
root_url = %(protocol)s://%(domain)s/grafana/
```
Create an Inbound Rule for the parent website (localhost:8080 in this example) in IIS Manager with the following settings:

View File

@ -482,7 +482,7 @@ Set api_url to the resource that returns [OpenID UserInfo](https://connect2id.co
First set up Grafana as an OpenId client "webapplication" in Okta. Then set the Base URIs to `https://<grafana domain>/` and set the Login redirect URIs to `https://<grafana domain>/login/generic_oauth`.
Finaly set up the generic oauth module like this:
Finally set up the generic oauth module like this:
```bash
[auth.generic_oauth]
name = Okta
@ -659,6 +659,10 @@ Set to `true` to enable auto sign up of users who do not exist in Grafana DB. De
Limit where auth proxy requests come from by configuring a list of IP addresses. This can be used to prevent users spoofing the X-WEBAUTH-USER header.
### headers
Used to define additional headers for `Name`, `Email` and/or `Login`, for example if the user's name is sent in the X-WEBAUTH-NAME header and their email address in the X-WEBAUTH-EMAIL header, set `headers = Name:X-WEBAUTH-NAME Email:X-WEBAUTH-EMAIL`.
<hr>
## [session]

View File

@ -15,7 +15,10 @@ weight = 1
Description | Download
------------ | -------------
Stable for Debian-based Linux | [grafana_5.0.3_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.3_amd64.deb)
Stable for Debian-based Linux | [grafana_5.1.1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.1_amd64.deb)
<!--
Beta for Debian-based Linux | [grafana_5.1.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.0-beta1_amd64.deb)
-->
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
installation.
@ -24,17 +27,24 @@ installation.
```bash
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.3_amd64.deb
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.1_amd64.deb
sudo apt-get install -y adduser libfontconfig
sudo dpkg -i grafana_5.0.3_amd64.deb
sudo dpkg -i grafana_5.1.1_amd64.deb
```
<!-- ## Install Latest Beta
```bash
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.0-beta1_amd64.deb
sudo apt-get install -y adduser libfontconfig
sudo dpkg -i grafana_5.1.0-beta1_amd64.deb
``` -->
## APT Repository
Add the following line to your `/etc/apt/sources.list` file.
```bash
deb https://packagecloud.io/grafana/stable/debian/ jessie main
deb https://packagecloud.io/grafana/stable/debian/ stretch main
```
Use the above line even if you are on Ubuntu or another Debian version.
@ -42,7 +52,7 @@ There is also a testing repository if you want beta or release
candidates.
```bash
deb https://packagecloud.io/grafana/testing/debian/ jessie main
deb https://packagecloud.io/grafana/testing/debian/ stretch main
```
Then add the [Package Cloud](https://packagecloud.io/grafana) key. This

View File

@ -12,34 +12,12 @@ weight = 4
# Installing using Docker
Grafana is very easy to install and run using the offical docker container.
Grafana is very easy to install and run using the official docker container.
```bash
$ docker run -d -p 3000:3000 grafana/grafana
```
All Grafana configuration settings can be defined using environment
variables, this is especially useful when using the above container.
## Docker volumes & ENV config
The Docker container exposes two volumes, the sqlite3 database in the
folder `/var/lib/grafana` and configuration files is in `/etc/grafana/`
folder. You can map these volumes to host folders when you start the
container:
```bash
$ docker run -d -p 3000:3000 \
-v /var/lib/grafana:/var/lib/grafana \
-e "GF_SECURITY_ADMIN_PASSWORD=secret" \
grafana/grafana
```
In the above example I map the data folder and sets a configuration option via
an `ENV` instruction.
See the [docker volumes documentation](https://docs.docker.com/engine/admin/volumes/volumes/) if you want to create a volume to use with the Grafana docker image instead of a bind mount (binding to a directory in the host system).
## Configuration
All options defined in conf/grafana.ini can be overridden using environment
@ -56,15 +34,24 @@ $ docker run \
grafana/grafana
```
You can use your own grafana.ini file by using environment variable `GF_PATHS_CONFIG`.
The back-end web server has a number of configuration options. Go to the
[Configuration]({{< relref "configuration.md" >}}) page for details on all
those options.
## Running a Specific Version of Grafana
```bash
# specify right tag, e.g. 5.1.0 - see Docker Hub for available tags
$ docker run \
-d \
-p 3000:3000 \
--name grafana \
grafana/grafana:5.1.0
```
## Installing Plugins for Grafana
Pass the plugins you want installed to docker with the `GF_INSTALL_PLUGINS` environment variable as a comma separated list. This will pass each plugin name to `grafana-cli plugins install ${plugin}`.
Pass the plugins you want installed to docker with the `GF_INSTALL_PLUGINS` environment variable as a comma separated list. This will pass each plugin name to `grafana-cli plugins install ${plugin}` and install them when Grafana starts.
```bash
docker run \
@ -75,15 +62,22 @@ docker run \
grafana/grafana
```
## Running a Specific Version of Grafana
## Building a custom Grafana image with pre-installed plugins
In the [grafana-docker](https://github.com/grafana/grafana-docker/) there is a folder called `custom/` which includes a `Dockerfile` that can be used to build a custom Grafana image. It accepts `GRAFANA_VERSION` and `GF_INSTALL_PLUGINS` as build arguments.
Example of how to build and run:
```bash
# specify right tag, e.g. 4.5.2 - see Docker Hub for available tags
$ docker run \
cd custom
docker build -t grafana:latest-with-plugins \
--build-arg "GRAFANA_VERSION=latest" \
--build-arg "GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource" .
docker run \
-d \
-p 3000:3000 \
--name grafana \
grafana/grafana:5.0.2
--name=grafana \
grafana:latest-with-plugins
```
## Configuring AWS Credentials for CloudWatch Support
@ -108,3 +102,94 @@ Supported variables:
- `GF_AWS_${profile}_ACCESS_KEY_ID`: AWS access key ID (required).
- `GF_AWS_${profile}_SECRET_ACCESS_KEY`: AWS secret access key (required).
- `GF_AWS_${profile}_REGION`: AWS region (optional).
## Grafana container with persistent storage (recommended)
```bash
# create a persistent volume for your data in /var/lib/grafana (database and plugins)
docker volume create grafana-storage
# start grafana
docker run \
-d \
-p 3000:3000 \
--name=grafana \
-v grafana-storage:/var/lib/grafana \
grafana/grafana
```
## Grafana container using bind mounts
You may want to run Grafana in Docker but use folders on your host for the database or configuration. When doing so it becomes important to start the container with a user that is able to access and write to the folder you map into the container.
```bash
mkdir data # creates a folder for your data
ID=$(id -u) # saves your user id in the ID variable
# starts grafana with your user id and using the data folder
docker run -d --user $ID --volume "$PWD/data:/var/lib/grafana" -p 3000:3000 grafana/grafana:5.1.0
```
## Migration from a previous version of the docker container to 5.1 or later
The docker container for Grafana has seen a major rewrite for 5.1.
**Important changes**
* file ownership is no longer modified during startup with `chown`
* default user id `472` instead of `104`
* no more implicit volumes
- `/var/lib/grafana`
- `/etc/grafana`
- `/var/log/grafana`
### Removal of implicit volumes
Previously `/var/lib/grafana`, `/etc/grafana` and `/var/log/grafana` were defined as volumes in the `Dockerfile`. This led to the creation of three volumes each time a new instance of the Grafana container started, whether you wanted it or not.
You should always be careful to define your own named volume for storage, but if you depended on these volumes you should be aware that an upgraded container will no longer have them.
**Warning**: when migrating from an earlier version to 5.1 or later using docker compose and implicit volumes you need to use `docker inspect` to find out which volumes your container is mapped to so that you can map them to the upgraded container as well. You will also have to change file ownership (or user) as documented below.
### User ID changes
In 5.1 we switched the id of the grafana user. Unfortunately this means that files created prior to 5.1 won't have the correct permissions for later versions. We made this change so that it would be more likely that the grafana users id would be unique to Grafana. For example, on Ubuntu 16.04 `104` is already in use by the syslog user.
Version | User | User ID
--------|---------|---------
< 5.1 | grafana | 104
>= 5.1 | grafana | 472
There are two possible solutions to this problem. Either you start the new container as the root user and change ownership from `104` to `472` or you start the upgraded container as user `104`.
#### Running docker as a different user
```bash
docker run --user 104 --volume "<your volume mapping here>" grafana/grafana:5.1.0
```
##### Specifying a user in docker-compose.yml
```yaml
version: "2"
services:
grafana:
image: grafana/grafana:5.1.0
ports:
- 3000:3000
user: "104"
```
#### Modifying permissions
The commands below will run bash inside the Grafana container with your volume mapped in. This makes it possible to modify the file ownership to match the new container. Always be careful when modifying permissions.
```bash
$ docker run -ti --user root --volume "<your volume mapping here>" --entrypoint bash grafana/grafana:5.1.0
# in the container you just started:
chown -R root:root /etc/grafana && \
chmod -R a+r /etc/grafana && \
chown -R grafana:grafana /var/lib/grafana && \
chown -R grafana:grafana /usr/share/grafana
```

View File

@ -15,8 +15,10 @@ weight = 2
Description | Download
------------ | -------------
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [5.0.3 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.3-1.x86_64.rpm)
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [5.1.1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.1-1.x86_64.rpm)
<!--
Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [5.1.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.0-beta1.x86_64.rpm)
-->
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
installation.
@ -26,23 +28,29 @@ installation.
You can install Grafana using Yum directly.
```bash
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.3-1.x86_64.rpm
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.1-1.x86_64.rpm
```
<!-- ## Install Beta
```bash
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.0-beta1.x86_64.rpm
``` -->
Or install manually using `rpm`.
#### On CentOS / Fedora / Redhat:
```bash
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.3-1.x86_64.rpm
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.1-1.x86_64.rpm
$ sudo yum install initscripts fontconfig
$ sudo rpm -Uvh grafana-5.0.3-1.x86_64.rpm
$ sudo rpm -Uvh grafana-5.1.1-1.x86_64.rpm
```
#### On OpenSuse:
```bash
$ sudo rpm -i --nodeps grafana-5.0.3-1.x86_64.rpm
$ sudo rpm -i --nodeps grafana-5.1.1-1.x86_64.rpm
```
## Install via YUM Repository
@ -52,7 +60,7 @@ Add the following to a new file at `/etc/yum.repos.d/grafana.repo`
```bash
[grafana]
name=grafana
baseurl=https://packagecloud.io/grafana/stable/el/6/$basearch
baseurl=https://packagecloud.io/grafana/stable/el/7/$basearch
repo_gpgcheck=1
enabled=1
gpgcheck=1
@ -64,7 +72,7 @@ sslcacert=/etc/pki/tls/certs/ca-bundle.crt
There is also a testing repository if you want beta or release candidates.
```bash
baseurl=https://packagecloud.io/grafana/testing/el/6/$basearch
baseurl=https://packagecloud.io/grafana/testing/el/7/$basearch
```
Then install Grafana via the `yum` command.

View File

@ -23,9 +23,9 @@ Before upgrading it can be a good idea to backup your Grafana database. This wil
#### sqlite
If you use sqlite you only need to make a backup of you `grafana.db` file. This is usually located at `/var/lib/grafana/grafana.db` on unix system.
If you use sqlite you only need to make a backup of your `grafana.db` file. This is usually located at `/var/lib/grafana/grafana.db` on unix system.
If you are unsure what database you use and where it is stored check you grafana configuration file. If you
installed grafana to custom location using a binary tar/zip it is usally in `<grafana_install_dir>/data`.
installed grafana to custom location using a binary tar/zip it is usually in `<grafana_install_dir>/data`.
#### mysql

View File

@ -8,12 +8,15 @@ parent = "installation"
weight = 3
+++
# Installing on Windows
Description | Download
------------ | -------------
Latest stable package for Windows | [grafana-5.0.3.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.3.windows-x64.zip)
Latest stable package for Windows | [grafana-5.1.1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.1.windows-x64.zip)
<!--
Latest beta package for Windows | [grafana.5.1.0-beta1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.0-beta5.windows-x64.zip)
-->
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
installation.

View File

@ -5,7 +5,7 @@ type = "docs"
[menu.docs]
name = "Developing App Plugins"
parent = "developing"
weight = 6
weight = 4
+++
# Grafana Apps

View File

@ -5,7 +5,7 @@ type = "docs"
[menu.docs]
name = "Developing Datasource Plugins"
parent = "developing"
weight = 6
weight = 5
+++
# Datasources

View File

@ -1,16 +1,11 @@
---
page_title: Plugin panel
page_description: Panel plugins for Grafana
page_keywords: grafana, plugins, documentation
---
+++
title = "Installing Plugins"
title = "Developing Panel Plugins"
keywords = ["grafana", "plugins", "panel", "documentation"]
type = "docs"
[menu.docs]
name = "Developing Panel Plugins"
parent = "developing"
weight = 1
weight = 4
+++
@ -20,7 +15,21 @@ Panels are the main building blocks of dashboards.
## Panel development
Examples
### Scrolling
The grafana dashboard framework controls the panel height. To enable a scrollbar within the panel the PanelCtrl needs to set the scrollable static variable:
```javascript
export class MyPanelCtrl extends PanelCtrl {
static scrollable = true;
...
```
In this case, make sure the template has a single `<div>...</div>` root. The plugin loader will modifiy that element adding a scrollbar.
### Examples
- [clock-panel](https://github.com/grafana/clock-panel)
- [singlestat-panel](https://github.com/grafana/grafana/blob/master/public/app/plugins/panel/singlestat/module.ts)

View File

@ -5,7 +5,7 @@ type = "docs"
[menu.docs]
name = "plugin.json Schema"
parent = "developing"
weight = 6
weight = 8
+++
# Plugin.json

View File

@ -71,13 +71,13 @@ Each field in the dashboard JSON is explained below with its usage:
| **timepicker** | timepicker metadata, see [timepicker section](#timepicker) for details |
| **templating** | templating metadata, see [templating section](#templating) for details |
| **annotations** | annotations metadata, see [annotations section](#annotations) for details |
| **schemaVersion** | version of the JSON schema (integer), incremented each time a Grafana update brings changes to the said schema |
| **schemaVersion** | version of the JSON schema (integer), incremented each time a Grafana update brings changes to said schema |
| **version** | version of the dashboard (integer), incremented each time the dashboard is updated |
| **panels** | panels array, see below for detail. |
## Panels
Panels are the building blocks a dashboard. It consists of datasource queries, type of graphs, aliases, etc. Panel JSON consists of an array of JSON objects, each representing a different panel. Most of the fields are common for all panels but some fields depends on the panel type. Following is an example of panel JSON of a text panel.
Panels are the building blocks of a dashboard. It consists of datasource queries, type of graphs, aliases, etc. Panel JSON consists of an array of JSON objects, each representing a different panel. Most of the fields are common for all panels but some fields depend on the panel type. Following is an example of panel JSON of a text panel.
```json
"panels": [
@ -105,7 +105,7 @@ The gridPos property describes the panel size and position in grid coordinates.
- `x` The x position, in same unit as `w`.
- `y` The y position, in same unit as `h`.
The grid has a negative gravity that moves panels up if there i empty space above a panel.
The grid has a negative gravity that moves panels up if there is empty space above a panel.
### timepicker
@ -161,7 +161,7 @@ Usage of the fields is explained below:
### templating
`templating` fields contains array of template variables with their saved values along with some other metadata, for example:
The `templating` field contains an array of template variables with their saved values along with some other metadata, for example:
```json
"templating": {
@ -236,7 +236,7 @@ Usage of the above mentioned fields in the templating section is explained below
| Name | Usage |
| ---- | ----- |
| **enable** | whether templating is enabled or not |
| **list** | an array of objects representing, each representing one template variable |
| **list** | an array of objects each representing one template variable |
| **allFormat** | format to use while fetching all values from datasource, eg: `wildcard`, `glob`, `regex`, `pipe`, etc. |
| **current** | shows current selected variable text/value on the dashboard |
| **datasource** | shows datasource for the variables |

View File

@ -49,7 +49,7 @@ Click the back button to rewind to the previous Dashboard in the Playlist.
In TV mode the top navbar, row & panel controls will all fade to transparent.
This happens automatically after one minute of user inactivity but can also be toggled manually
with the `d v` sequence shortcut. Any mouse movement or keyboard action will
with the `d v` sequence shortcut, or by appending the parameter `?inactive` to the dashboard URL. Any mouse movement or keyboard action will
restore navbar & controls.
Another feature is the kiosk mode - in kiosk mode the navbar is completely hidden/removed from view. This can be enabled with the `d k`

View File

@ -36,6 +36,29 @@ interpolation the variable value might be **escaped** in order to conform to the
For example, a variable used in a regex expression in an InfluxDB or Prometheus query will be regex escaped. Read the data source specific
documentation article for details on value escaping during interpolation.
### Advanced Formatting Options
> Only available in Grafana v5.1+.
The formatting of the variable interpolation depends on the data source but there are some situations where you might want to change the default formatting. For example, the default for the MySql datasource is to join multiple values as comma-separated with quotes: `'server01','server02'`. In some cases you might want to have a comma-separated string without quotes: `server01,server02`. This is now possible with the advanced formatting options.
Syntax: `${var_name:option}`
Filter Option | Example | Raw | Interpolated | Description
------------ | ------------- | ------------- | ------------- | -------------
`glob` | ${servers:glob} | `'test1', 'test2'` | `{test1,test2}` | (Default) Formats multi-value variable into a glob (for Graphite queries)
`regex` | ${servers:regex} | `'test.', 'test2'` | <code>(test\.&#124;test2)</code> | Formats multi-value variable into a regex string
`pipe` | ${servers:pipe} | `'test.', 'test2'` | <code>test.&#124;test2</code> | Formats multi-value variable into a pipe-separated string
`csv`| ${servers:csv} | `'test1', 'test2'` | `test1,test2` | Formats multi-value variable as a comma-separated string
`distributed`| ${servers:distributed} | `'test1', 'test2'` | `test1,servers=test2` | Formats multi-value variable in custom format for OpenTSDB.
`lucene`| ${servers:lucene} | `'test', 'test2'` | `("test" OR "test2")` | Formats multi-value variable as a lucene expression.
Test the formatting options on the [Grafana Play site](http://play.grafana.org/d/cJtIfcWiz/template-variable-formatting-options?orgId=1).
If any invalid formatting option is specified, then `glob` is the default/fallback option.
An alternative syntax (that might be deprecated in the future) is `[[var_name:option]]`.
### Variable options
A variable is presented as a dropdown select box at the top of the dashboard. It has a current value and a set of **options**. The **options**
@ -166,14 +189,16 @@ Option | Description
------- | --------
*Multi-value* | If enabled, the variable will support the selection of multiple options at the same time.
*Include All option* | Add a special `All` option whose value includes all options.
*Custom all value* | By default the `All` value will include all options in combined expression. This can become very long and can have performance problems. Many times it can be better to specify a custom all value, like a wildcard regex. To make it possible to have custom regex, globs or lucene syntax in the **Custom all value** option it is never escaped so you will have to think avbout what is a valid value for your data source.
*Custom all value* | By default the `All` value will include all options in combined expression. This can become very long and can have performance problems. Many times it can be better to specify a custom all value, like a wildcard regex. To make it possible to have custom regex, globs or lucene syntax in the **Custom all value** option it is never escaped so you will have to think about what is a valid value for your data source.
### Formating multiple values
### Formatting multiple values
Interpolating a variable with multiple values selected is tricky as it is not straight forward how to format the multiple values to into a string that
is valid in the given context where the variable is used. Grafana tries to solve this by allowing each data source plugin to
inform the templating interpolation engine what format to use for multiple values.
Note that the *Custom all value* option on the variable will have to be left blank for Grafana to format all values into a single string.
**Graphite**, for example, uses glob expressions. A variable with multiple values would, in this case, be interpolated as `{host1,host2,host3}` if
the current variable value was *host1*, *host2* and *host3*.
@ -184,7 +209,7 @@ break the regex expression.
**Elasticsearch** uses lucene query syntax, so the same variable would, in this case, be formatted as `("host1" OR "host2" OR "host3")`. In this case every value
needs to be escaped so that the value can contain lucene control words and quotation marks.
#### Formating troubles
#### Formatting troubles
Automatic escaping & formatting can cause problems and it can be tricky to grasp the logic is behind it.
Especially for InfluxDB and Prometheus where the use of regex syntax requires that the variable is used in regex operator context.
@ -275,4 +300,3 @@ Variable values are always synced to the URL using the syntax `var-<varname>=val
- [Graphite Templated Dashboard](http://play.grafana.org/dashboard/db/graphite-templated-nested)
- [Elasticsearch Templated Dashboard](http://play.grafana.org/dashboard/db/elasticsearch-templated)
- [InfluxDB Templated Dashboard](http://play.grafana.org/dashboard/db/influxdb-templated-queries)

View File

@ -108,7 +108,7 @@ In this example we use Apache as a reverseProxy in front of Grafana. Apache hand
* The next part of the configuration is the tricky part. We use Apaches rewrite engine to create our **X-WEBAUTH-USER header**, populated with the authenticated user.
* **RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER}, NS]**: This line is a little bit of magic. What it does, is for every request use the rewriteEngines look-ahead (LA-U) feature to determine what the REMOTE_USER variable would be set to after processing the request. Then assign the result to the variable PROXY_USER. This is neccessary as the REMOTE_USER variable is not available to the RequestHeader function.
* **RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER}, NS]**: This line is a little bit of magic. What it does, is for every request use the rewriteEngines look-ahead (LA-U) feature to determine what the REMOTE_USER variable would be set to after processing the request. Then assign the result to the variable PROXY_USER. This is necessary as the REMOTE_USER variable is not available to the RequestHeader function.
* **RequestHeader set X-WEBAUTH-USER “%{PROXY_USER}e”**: With the authenticated username now stored in the PROXY_USER variable, we create a new HTTP request header that will be sent to our backend Grafana containing the username.
@ -149,7 +149,7 @@ auto_sign_up = true
##### Grafana Container
For this example, we use the offical Grafana docker image available at [Docker Hub](https://hub.docker.com/r/grafana/grafana/)
For this example, we use the official Grafana docker image available at [Docker Hub](https://hub.docker.com/r/grafana/grafana/)
* Create a file `grafana.ini` with the following contents
@ -166,7 +166,7 @@ header_property = username
auto_sign_up = true
```
* Launch the Grafana container, using our custom grafana.ini to replace `/etc/grafana/grafana.ini`. We dont expose any ports for this container as it will only be connected to by our Apache container.
* Launch the Grafana container, using our custom grafana.ini to replace `/etc/grafana/grafana.ini`. We don't expose any ports for this container as it will only be connected to by our Apache container.
```bash
docker run -i -v $(pwd)/grafana.ini:/etc/grafana/grafana.ini --name grafana grafana/grafana
@ -174,7 +174,7 @@ docker run -i -v $(pwd)/grafana.ini:/etc/grafana/grafana.ini --name grafana graf
### Apache Container
For this example we use the offical Apache docker image available at [Docker Hub](https://hub.docker.com/_/httpd/)
For this example we use the official Apache docker image available at [Docker Hub](https://hub.docker.com/_/httpd/)
* Create a file `httpd.conf` with the following contents
@ -244,4 +244,4 @@ ProxyPassReverse / http://grafana:3000/
### Use grafana.
With our Grafana and Apache containers running, you can now connect to http://localhost/ and log in using the username/password we created in the htpasswd file.
With our Grafana and Apache containers running, you can now connect to http://localhost/ and log in using the username/password we created in the htpasswd file.

View File

@ -16,7 +16,7 @@ Example:
- Parent site: http://localhost:8080
- Grafana: http://localhost:3000
Grafana as a subpath: http://localhost:8080/grafana
Grafana as a subpath: http://localhost:8080/grafana
## Setup
@ -33,7 +33,7 @@ Given that the subpath should be `grafana` and the parent site is `localhost:808
```bash
[server]
domain = localhost:8080
root_url = %(protocol)s://%(domain)s:/grafana
root_url = %(protocol)s://%(domain)s/grafana/
```
Restart the Grafana server after changing the config file.
@ -74,11 +74,11 @@ When navigating to the grafana url (`http://localhost:8080/grafana` in the examp
1. The `root_url` setting in the Grafana config file does not match the parent url with subpath. This could happen if the root_url is commented out by mistake (`;` is used for commenting out a line in .ini files):
`; root_url = %(protocol)s://%(domain)s:/grafana`
`; root_url = %(protocol)s://%(domain)s/grafana/`
2. or if the subpath in the `root_url` setting does not match the subpath used in the pattern in the Inbound Rule in IIS:
`root_url = %(protocol)s://%(domain)s:/grafana`
`root_url = %(protocol)s://%(domain)s/grafana/`
pattern in Inbound Rule: `wrongsubpath(/)?(.*)`

View File

@ -1,6 +1,6 @@
[
{ "version": "v5.1", "path": "/v5.1", "archived": false },
{ "version": "v5.0", "path": "/", "archived": false, "current": true },
{ "version": "v5.1", "path": "/", "archived": false, "current": true },
{ "version": "v5.0", "path": "/v5.0", "archived": true },
{ "version": "v4.6", "path": "/v4.6", "archived": true },
{ "version": "v4.5", "path": "/v4.5", "archived": true },
{ "version": "v4.4", "path": "/v4.4", "archived": true },

View File

@ -1,4 +1,4 @@
{
"stable": "5.0.0",
"testing": "5.0.0"
"stable": "5.0.4",
"testing": "5.0.4"
}

View File

@ -4,7 +4,7 @@
"company": "Grafana Labs"
},
"name": "grafana",
"version": "5.1.0-pre1",
"version": "5.2.0-pre1",
"repository": {
"type": "git",
"url": "http://github.com/grafana/grafana.git"
@ -22,7 +22,9 @@
"axios": "^0.17.1",
"babel-core": "^6.26.0",
"babel-loader": "^7.1.2",
"babel-plugin-syntax-dynamic-import": "^6.18.0",
"babel-preset-es2015": "^6.24.1",
"clean-webpack-plugin": "^0.1.19",
"css-loader": "^0.28.7",
"enzyme": "^3.1.0",
"enzyme-adapter-react-16": "^1.0.1",
@ -54,6 +56,7 @@
"grunt-usemin": "3.1.1",
"grunt-webpack": "^3.0.2",
"html-loader": "^0.5.1",
"html-webpack-harddisk-plugin": "^0.2.0",
"html-webpack-plugin": "^2.30.1",
"husky": "^0.14.3",
"jest": "^22.0.4",
@ -80,10 +83,12 @@
"postcss-loader": "^2.0.6",
"postcss-reporter": "^5.0.0",
"prettier": "1.9.2",
"react-hot-loader": "^4.0.1",
"react-test-renderer": "^16.0.0",
"sass-lint": "^1.10.2",
"sass-loader": "^6.0.6",
"sinon": "1.17.6",
"style-loader": "^0.20.3",
"systemjs": "0.20.19",
"systemjs-plugin-css": "^0.1.36",
"ts-jest": "^22.0.0",
@ -94,20 +99,22 @@
"webpack": "^3.10.0",
"webpack-bundle-analyzer": "^2.9.0",
"webpack-cleanup-plugin": "^0.5.1",
"webpack-dev-server": "2.11.1",
"webpack-merge": "^4.1.0",
"zone.js": "^0.7.2"
},
"scripts": {
"dev": "webpack --progress --colors --config scripts/webpack/webpack.dev.js",
"start": "webpack-dev-server --progress --colors --config scripts/webpack/webpack.dev.js",
"watch": "webpack --progress --colors --watch --config scripts/webpack/webpack.dev.js",
"build": "grunt build",
"test": "grunt test",
"test:coverage": "grunt test --coverage=true",
"lint": "tslint -c tslint.json --project tsconfig.json --type-check",
"karma": "node ./node_modules/grunt-cli/bin/grunt karma:dev",
"jest": "node ./node_modules/jest-cli/bin/jest.js --notify --watch",
"api-tests": "node ./node_modules/jest-cli/bin/jest.js --notify --watch --config=tests/api/jest.js",
"precommit": "lint-staged && node ./node_modules/grunt-cli/bin/grunt precommit"
"karma": "grunt karma:dev",
"jest": "jest --notify --watch",
"api-tests": "jest --notify --watch --config=tests/api/jest.js",
"precommit": "lint-staged && grunt precommit"
},
"lint-staged": {
"*.{ts,tsx}": [
@ -136,6 +143,7 @@
"angular-route": "^1.6.6",
"angular-sanitize": "^1.6.6",
"babel-polyfill": "^6.26.0",
"baron": "^3.0.3",
"brace": "^0.10.0",
"classnames": "^2.2.5",
"clipboard": "^1.7.1",
@ -143,6 +151,7 @@
"d3-scale-chromatic": "^1.1.1",
"eventemitter3": "^2.0.3",
"file-saver": "^1.3.3",
"immutable": "^3.8.2",
"jquery": "^3.2.1",
"lodash": "^4.17.4",
"mobx": "^3.4.1",
@ -151,7 +160,7 @@
"moment": "^2.18.1",
"mousetrap": "^1.6.0",
"mousetrap-global-bind": "^1.1.0",
"perfect-scrollbar": "^1.2.0",
"prismjs": "^1.6.0",
"prop-types": "^15.6.0",
"react": "^16.2.0",
"react-dom": "^16.2.0",
@ -164,6 +173,9 @@
"remarkable": "^1.7.1",
"rst2html": "github:thoward/rst2html#990cb89",
"rxjs": "^5.4.3",
"slate": "^0.33.4",
"slate-plain-serializer": "^0.5.10",
"slate-react": "^0.12.4",
"tether": "^1.4.0",
"tether-drop": "https://github.com/torkelo/drop/tarball/master",
"tinycolor2": "^1.4.1"

View File

@ -1,6 +1,6 @@
#! /usr/bin/env bash
deb_ver=5.0.0-beta5
rpm_ver=5.0.0-beta5
deb_ver=5.1.0-beta1
rpm_ver=5.1.0-beta1
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${deb_ver}_amd64.deb

View File

@ -12,7 +12,7 @@ import (
func AdminGetSettings(c *m.ReqContext) {
settings := make(map[string]interface{})
for _, section := range setting.Cfg.Sections() {
for _, section := range setting.Raw.Sections() {
jsonSec := make(map[string]interface{})
settings[section.Name()] = jsonSec

View File

@ -2,7 +2,6 @@ package api
import (
"strings"
"time"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/components/simplejson"
@ -15,9 +14,10 @@ import (
func GetAnnotations(c *m.ReqContext) Response {
query := &annotations.ItemQuery{
From: c.QueryInt64("from") / 1000,
To: c.QueryInt64("to") / 1000,
From: c.QueryInt64("from"),
To: c.QueryInt64("to"),
OrgId: c.OrgId,
UserId: c.QueryInt64("userId"),
AlertId: c.QueryInt64("alertId"),
DashboardId: c.QueryInt64("dashboardId"),
PanelId: c.QueryInt64("panelId"),
@ -37,7 +37,7 @@ func GetAnnotations(c *m.ReqContext) Response {
if item.Email != "" {
item.AvatarUrl = dtos.GetGravatarUrl(item.Email)
}
item.Time = item.Time * 1000
item.Time = item.Time
}
return JSON(200, items)
@ -68,16 +68,12 @@ func PostAnnotation(c *m.ReqContext, cmd dtos.PostAnnotationsCmd) Response {
UserId: c.UserId,
DashboardId: cmd.DashboardId,
PanelId: cmd.PanelId,
Epoch: cmd.Time / 1000,
Epoch: cmd.Time,
Text: cmd.Text,
Data: cmd.Data,
Tags: cmd.Tags,
}
if item.Epoch == 0 {
item.Epoch = time.Now().Unix()
}
if err := repo.Save(&item); err != nil {
return Error(500, "Failed to save annotation", err)
}
@ -97,7 +93,7 @@ func PostAnnotation(c *m.ReqContext, cmd dtos.PostAnnotationsCmd) Response {
}
item.Id = 0
item.Epoch = cmd.TimeEnd / 1000
item.Epoch = cmd.TimeEnd
if err := repo.Save(&item); err != nil {
return Error(500, "Failed save annotation for region end time", err)
@ -132,9 +128,6 @@ func PostGraphiteAnnotation(c *m.ReqContext, cmd dtos.PostGraphiteAnnotationsCmd
return Error(500, "Failed to save Graphite annotation", err)
}
if cmd.When == 0 {
cmd.When = time.Now().Unix()
}
text := formatGraphiteAnnotation(cmd.What, cmd.Data)
// Support tags in prior to Graphite 0.10.0 format (string of tags separated by space)
@ -163,7 +156,7 @@ func PostGraphiteAnnotation(c *m.ReqContext, cmd dtos.PostGraphiteAnnotationsCmd
item := annotations.Item{
OrgId: c.OrgId,
UserId: c.UserId,
Epoch: cmd.When,
Epoch: cmd.When * 1000,
Text: text,
Tags: tagsArray,
}
@ -191,7 +184,7 @@ func UpdateAnnotation(c *m.ReqContext, cmd dtos.UpdateAnnotationsCmd) Response {
OrgId: c.OrgId,
UserId: c.UserId,
Id: annotationID,
Epoch: cmd.Time / 1000,
Epoch: cmd.Time,
Text: cmd.Text,
Tags: cmd.Tags,
}
@ -203,7 +196,7 @@ func UpdateAnnotation(c *m.ReqContext, cmd dtos.UpdateAnnotationsCmd) Response {
if cmd.IsRegion {
itemRight := item
itemRight.RegionId = item.Id
itemRight.Epoch = cmd.TimeEnd / 1000
itemRight.Epoch = cmd.TimeEnd
// We don't know id of region right event, so set it to 0 and find then using query like
// ... WHERE region_id = <item.RegionId> AND id != <item.RegionId> ...
@ -301,19 +294,3 @@ func canSave(c *m.ReqContext, repo annotations.Repository, annotationID int64) R
return nil
}
func canSaveByRegionID(c *m.ReqContext, repo annotations.Repository, regionID int64) Response {
items, err := repo.Find(&annotations.ItemQuery{RegionId: regionID, OrgId: c.OrgId})
if err != nil || len(items) == 0 {
return Error(500, "Could not find annotation to update", err)
}
dashboardID := items[0].DashboardId
if canSave, err := canSaveByDashboardID(c, dashboardID); err != nil || !canSave {
return dashboardGuardianResponse(err)
}
return nil
}

View File

@ -23,7 +23,7 @@ func (hs *HTTPServer) registerRoutes() {
// automatically set HEAD for every GET
macaronR.SetAutoHead(true)
r := newRouteRegister(middleware.RequestMetrics, middleware.RequestTracing)
r := hs.RouteRegister
// not logged in views
r.Get("/", reqSignedIn, Index)
@ -149,8 +149,6 @@ func (hs *HTTPServer) registerRoutes() {
// team (admin permission required)
apiRoute.Group("/teams", func(teamsRoute RouteRegister) {
teamsRoute.Get("/:teamId", wrap(GetTeamByID))
teamsRoute.Get("/search", wrap(SearchTeams))
teamsRoute.Post("/", bind(m.CreateTeamCommand{}), wrap(CreateTeam))
teamsRoute.Put("/:teamId", bind(m.UpdateTeamCommand{}), wrap(UpdateTeam))
teamsRoute.Delete("/:teamId", wrap(DeleteTeamByID))
@ -159,6 +157,12 @@ func (hs *HTTPServer) registerRoutes() {
teamsRoute.Delete("/:teamId/members/:userId", wrap(RemoveTeamMember))
}, reqOrgAdmin)
// team without requirement of user to be org admin
apiRoute.Group("/teams", func(teamsRoute RouteRegister) {
teamsRoute.Get("/:teamId", wrap(GetTeamByID))
teamsRoute.Get("/search", wrap(SearchTeams))
})
// org information available to all users.
apiRoute.Group("/org", func(orgRoute RouteRegister) {
orgRoute.Get("/", wrap(GetOrgCurrent))
@ -170,7 +174,6 @@ func (hs *HTTPServer) registerRoutes() {
orgRoute.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrgCurrent))
orgRoute.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddressCurrent))
orgRoute.Post("/users", quota("user"), bind(m.AddOrgUserCommand{}), wrap(AddOrgUserToCurrentOrg))
orgRoute.Get("/users", wrap(GetOrgUsersForCurrentOrg))
orgRoute.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUserForCurrentOrg))
orgRoute.Delete("/users/:userId", wrap(RemoveOrgUserForCurrentOrg))
@ -184,6 +187,11 @@ func (hs *HTTPServer) registerRoutes() {
orgRoute.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateOrgPreferences))
}, reqOrgAdmin)
// current org without requirement of user to be org admin
apiRoute.Group("/org", func(orgRoute RouteRegister) {
orgRoute.Get("/users", wrap(GetOrgUsersForCurrentOrg))
})
// create new org
apiRoute.Post("/orgs", quota("org"), bind(m.CreateOrgCommand{}), wrap(CreateOrg))

View File

@ -226,7 +226,7 @@ func (this *thunderTask) Fetch() {
this.Done()
}
var client *http.Client = &http.Client{
var client = &http.Client{
Timeout: time.Second * 2,
Transport: &http.Transport{Proxy: http.ProxyFromEnvironment},
}
@ -258,9 +258,6 @@ func (this *thunderTask) fetch() error {
this.Avatar.data = &bytes.Buffer{}
writer := bufio.NewWriter(this.Avatar.data)
if _, err = io.Copy(writer, resp.Body); err != nil {
return err
}
return nil
_, err = io.Copy(writer, resp.Body)
return err
}

View File

@ -102,6 +102,16 @@ func GetDashboard(c *m.ReqContext) Response {
meta.FolderUrl = query.Result.GetUrl()
}
isDashboardProvisioned := &m.IsDashboardProvisionedQuery{DashboardId: dash.Id}
err = bus.Dispatch(isDashboardProvisioned)
if err != nil {
return Error(500, "Error while checking if dashboard is provisioned", err)
}
if isDashboardProvisioned.Result {
meta.Provisioned = true
}
// make sure db version is in sync with json model version
dash.Data.Set("version", dash.Version)
@ -228,7 +238,8 @@ func PostDashboard(c *m.ReqContext, cmd m.SaveDashboardCommand) Response {
err == m.ErrDashboardWithSameUIDExists ||
err == m.ErrFolderNotFound ||
err == m.ErrDashboardFolderCannotHaveParent ||
err == m.ErrDashboardFolderNameExists {
err == m.ErrDashboardFolderNameExists ||
err == m.ErrDashboardCannotSaveProvisionedDashboard {
return Error(400, err.Error(), nil)
}

View File

@ -29,6 +29,11 @@ func GetDashboardPermissionList(c *m.ReqContext) Response {
}
for _, perm := range acl {
perm.UserAvatarUrl = dtos.GetGravatarUrl(perm.UserEmail)
if perm.TeamId > 0 {
perm.TeamAvatarUrl = dtos.GetGravatarUrlWithDefault(perm.TeamEmail, perm.Team)
}
if perm.Slug != "" {
perm.Url = m.GetDashboardFolderUrl(perm.IsFolder, perm.Uid, perm.Slug)
}

Some files were not shown because too many files have changed in this diff Show More