Merge branch 'master' into master

This commit is contained in:
Dan Cech 2018-04-19 12:01:48 -04:00 committed by GitHub
commit 7cc3d0c34e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
648 changed files with 103284 additions and 9539 deletions

View File

@ -1,6 +1,6 @@
[run]
init_cmds = [
["go", "build", "-o", "./bin/grafana-server", "./pkg/cmd/grafana-server"],
["go", "run", "build.go", "-dev", "build-server"],
["./bin/grafana-server", "cfg:app_mode=development"]
]
watch_all = true
@ -12,6 +12,6 @@ watch_dirs = [
watch_exts = [".go", ".ini", ".toml"]
build_delay = 1500
cmds = [
["go", "build", "-o", "./bin/grafana-server", "./pkg/cmd/grafana-server"],
["go", "run", "build.go", "-dev", "build-server"],
["./bin/grafana-server", "cfg:app_mode=development"]
]

175
.circleci/config.yml Normal file
View File

@ -0,0 +1,175 @@
version: 2
jobs:
codespell:
docker:
- image: circleci/python
steps:
- checkout
- run:
name: install codespell
command: 'sudo pip install codespell'
- run:
# Important: all words have to be in lowercase, and separated by "\n".
name: exclude known exceptions
command: 'echo -e "unknwon" > words_to_ignore.txt'
- run:
name: check documentation spelling errors
command: 'codespell -I ./words_to_ignore.txt docs/'
gometalinter:
docker:
- image: circleci/golang:1.10
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
- run:
name: install gometalinter tool
command: 'go get -u github.com/alecthomas/gometalinter'
- run:
name: install linters
command: 'gometalinter --install'
- run:
name: run some linters
command: 'gometalinter --vendor --deadline 6m --disable-all --enable=structcheck --enable=unconvert --enable=varcheck ./pkg/...'
test-frontend:
docker:
- image: circleci/node:6.11.4
steps:
- checkout
- run:
name: install yarn
command: 'sudo npm install -g yarn --quiet'
- restore_cache:
key: dependency-cache-{{ checksum "yarn.lock" }}
# Could we skip this step if the cache has been restored? `[ -d node_modules ] || yarn install ...` should be able to apply to build step as well
- run:
name: yarn install
command: 'yarn install --pure-lockfile --no-progress'
- save_cache:
key: dependency-cache-{{ checksum "yarn.lock" }}
paths:
- node_modules
- run:
name: frontend tests
command: './scripts/circle-test-frontend.sh'
test-backend:
docker:
- image: circleci/golang:1.10
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
- run:
name: build backend and run go tests
command: './scripts/circle-test-backend.sh'
build:
docker:
- image: grafana/build-container:v0.1
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
- run:
name: build and package grafana
command: './scripts/build/build.sh'
- run:
name: sign packages
command: './scripts/build/sign_packages.sh'
- run:
name: sha-sum packages
command: 'go run build.go sha-dist'
- run:
name: Build Grafana.com publisher
command: 'go build -o scripts/publish scripts/build/publish.go'
- persist_to_workspace:
root: .
paths:
- dist/grafana*
- scripts/*.sh
- scripts/publish
deploy-master:
docker:
- image: circleci/python:2.7-stretch
steps:
- attach_workspace:
at: .
- run:
name: install awscli
command: 'sudo pip install awscli'
- run:
name: deploy to s3
command: 'aws s3 sync ./dist s3://$BUCKET_NAME/master'
- run:
name: Trigger Windows build
command: './scripts/trigger_windows_build.sh ${APPVEYOR_TOKEN} ${CIRCLE_SHA1} master'
- run:
name: Trigger Docker build
command: './scripts/trigger_docker_build.sh ${TRIGGER_GRAFANA_PACKER_CIRCLECI_TOKEN}'
- run:
name: Publish to Grafana.com
command: './scripts/publish -apiKey ${GRAFANA_COM_API_KEY}'
deploy-release:
docker:
- image: circleci/python:2.7-stretch
steps:
- attach_workspace:
at: dist
- run:
name: install awscli
command: 'sudo pip install awscli'
- run:
name: deploy to s3
command: 'aws s3 sync ./dist s3://$BUCKET_NAME/release'
- run:
name: Trigger Windows build
command: './scripts/trigger_windows_build.sh ${APPVEYOR_TOKEN} ${CIRCLE_SHA1} release'
- run:
name: Trigger Docker build
command: './scripts/trigger_docker_build.sh ${TRIGGER_GRAFANA_PACKER_CIRCLECI_TOKEN} ${CIRCLE_TAG}'
workflows:
version: 2
test-and-build:
jobs:
- codespell:
filters:
tags:
only: /.*/
- gometalinter:
filters:
tags:
only: /.*/
- build:
filters:
tags:
only: /.*/
- test-frontend:
filters:
tags:
only: /.*/
- test-backend:
filters:
tags:
only: /.*/
- deploy-master:
requires:
- test-backend
- test-frontend
- build
filters:
branches:
only: master
- deploy-release:
requires:
- test-backend
- test-frontend
- build
filters:
branches:
ignore: /.*/
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/

View File

@ -5,12 +5,12 @@ Read before posting:
- Checkout How to troubleshoot metric query issues: https://community.grafana.com/t/how-to-troubleshoot-metric-query-issues/50
Please include this information:
- What Grafana version are you using?
- What datasource are you using?
- What OS are you running grafana on?
- What did you do?
- What was the expected result?
- What happened instead?
- If related to metric query / data viz:
- Include raw network request & response: get by opening Chrome Dev Tools (F12, Ctrl+Shift+I on windows, Cmd+Opt+I on Mac), go the network tab.
### What Grafana version are you using?
### What datasource are you using?
### What OS are you running grafana on?
### What did you do?
### What was the expected result?
### What happened instead?
### If related to metric query / data viz:
### Include raw network request & response: get by opening Chrome Dev Tools (F12, Ctrl+Shift+I on windows, Cmd+Opt+I on Mac), go the network tab.

View File

@ -1,20 +1,95 @@
# 5.1.0 (unreleased)
* **MSSQL**: New Microsoft SQL Server data source [#10093](https://github.com/grafana/grafana/pull/10093), [#11298](https://github.com/grafana/grafana/pull/11298), thx [@linuxchips](https://github.com/linuxchips)
* **Prometheus**: The heatmap panel now support Prometheus histograms [#10009](https://github.com/grafana/grafana/issues/10009)
* **Postgres/MySQL**: Ability to insert 0s or nulls for missing intervals [#9487](https://github.com/grafana/grafana/issues/9487), thanks [@svenklemm](https://github.com/svenklemm)
* **Postgres/MySQL/MSSQL**: Fix precision for the time column in table mode [#11306](https://github.com/grafana/grafana/issues/11306)
* **Graph**: Align left and right Y-axes to one level [#1271](https://github.com/grafana/grafana/issues/1271) & [#2740](https://github.com/grafana/grafana/issues/2740) thx [@ilgizar](https://github.com/ilgizar)
* **Graph**: Thresholds for Right Y axis [#7107](https://github.com/grafana/grafana/issues/7107), thx [@ilgizar](https://github.com/ilgizar)
* **Graph**: Support multiple series stacking in histogram mode [#8151](https://github.com/grafana/grafana/issues/8151), thx [@mtanda](https://github.com/mtanda)
* **Alerting**: Pausing/un alerts now updates new_state_date [#10942](https://github.com/grafana/grafana/pull/10942)
* **Alerting**: Support Pagerduty notification channel using Pagerduty V2 API [#10531](https://github.com/grafana/grafana/issues/10531), thx [@jbaublitz](https://github.com/jbaublitz)
* **Templating**: Add comma templating format [#10632](https://github.com/grafana/grafana/issues/10632), thx [@mtanda](https://github.com/mtanda)
* **Prometheus**: Show template variable candidate in query editor [#9210](https://github.com/grafana/grafana/issues/9210), thx [@mtanda](https://github.com/mtanda)
* **Prometheus**: Support POST for query and query_range [#9859](https://github.com/grafana/grafana/pull/9859), thx [@mtanda](https://github.com/mtanda)
* **Alerting**: Add support for retries on alert queries [#5855](https://github.com/grafana/grafana/issues/5855), thx [@Thib17](https://github.com/Thib17)
* **Table**: Table plugin value mappings [#7119](https://github.com/grafana/grafana/issues/7119), thx [infernix](https://github.com/infernix)
* **IE11**: IE 11 compatibility [#11165](https://github.com/grafana/grafana/issues/11165)
* **Scrolling**: Better scrolling experience [#11053](https://github.com/grafana/grafana/issues/11053), [#11252](https://github.com/grafana/grafana/issues/11252), [#10836](https://github.com/grafana/grafana/issues/10836), [#11185](https://github.com/grafana/grafana/issues/11185), [#11168](https://github.com/grafana/grafana/issues/11168)
* **Docker**: Improved docker image (breaking changes regarding file ownership) [grafana-docker #141](https://github.com/grafana/grafana-docker/issues/141), thx [@Spindel](https://github.com/Spindel), [@ChristianKniep](https://github.com/ChristianKniep), [@brancz](https://github.com/brancz) and [@jangaraj](https://github.com/jangaraj)
* **Folders**: A folder admin cannot add user/team permissions for folder/its dashboards [#11173](https://github.com/grafana/grafana/issues/11173)
* **Provisioning**: Improved workflow for provisioned dashboards [#10883](https://github.com/grafana/grafana/issues/10883)
### Minor
* **OpsGenie**: Add triggered alerts as description [#11046](https://github.com/grafana/grafana/pull/11046), thx [@llamashoes](https://github.com/llamashoes)
# 5.0.1 (unreleased)
* **OpsGenie**: Add triggered alerts as description [#11046](https://github.com/grafana/grafana/pull/11046), thx [@llamashoes](https://github.com/llamashoes)
* **Cloudwatch**: Support high resolution metrics [#10925](https://github.com/grafana/grafana/pull/10925), thx [@mtanda](https://github.com/mtanda)
* **Cloudwatch**: Add dimension filtering to CloudWatch `dimension_values()` [#10029](https://github.com/grafana/grafana/issues/10029), thx [@willyhutw](https://github.com/willyhutw)
* **Units**: Second to HH:mm:ss formatter [#11107](https://github.com/grafana/grafana/issues/11107), thx [@gladdiologist](https://github.com/gladdiologist)
* **Singlestat**: Add color to prefix and postfix in singlestat panel [#11143](https://github.com/grafana/grafana/pull/11143), thx [@ApsOps](https://github.com/ApsOps)
* **Dashboards**: Version cleanup fails on old databases with many entries [#11278](https://github.com/grafana/grafana/issues/11278)
* **Server**: Adjust permissions of unix socket [#11343](https://github.com/grafana/grafana/pull/11343), thx [@corny](https://github.com/corny)
* **Shortcuts**: Add shortcut for duplicate panel [#11102](https://github.com/grafana/grafana/issues/11102)
* **AuthProxy**: Support IPv6 in Auth proxy white list [#11330](https://github.com/grafana/grafana/pull/11330), thx [@corny](https://github.com/corny)
* **SMTP**: Don't connect to STMP server using TLS unless configured. [#7189](https://github.com/grafana/grafana/issues/7189)
* **Prometheus**: Escape backslash in labels correctly. [#10555](https://github.com/grafana/grafana/issues/10555), thx [@roidelapluie](https://github.com/roidelapluie)
* **Variables**: Case-insensitive sorting for template values [#11128](https://github.com/grafana/grafana/issues/11128) thx [@cross](https://github.com/cross)
* **Annotations (native)**: Change default limit from 10 to 100 when querying api [#11569](https://github.com/grafana/grafana/issues/11569), thx [@flopp999](https://github.com/flopp999)
* **MySQL/Postgres/MSSQL**: PostgreSQL datasource generates invalid query with dates before 1970 [#11530](https://github.com/grafana/grafana/issues/11530) thx [@ryantxu](https://github.com/ryantxu)
* **Kiosk**: Adds url parameter for starting a dashboard in inactive mode [#11228](https://github.com/grafana/grafana/issues/11228), thx [@towolf](https://github.com/towolf)
* **Dashboard**: Enable closing timepicker using escape key [#11332](https://github.com/grafana/grafana/issues/11332)
* **Datasources**: Rename direct access mode in the data source settings [#11391](https://github.com/grafana/grafana/issues/11391)
* **Search**: Display dashboards in folder indented [#11073](https://github.com/grafana/grafana/issues/11073)
* **Units**: Use B/s instead Bps for Bytes per second [#9342](https://github.com/grafana/grafana/pull/9342), thx [@mayli](https://github.com/mayli)
* **Units**: Radiation units [#11001](https://github.com/grafana/grafana/issues/11001), thx [@victorclaessen](https://github.com/victorclaessen)
* **Units**: Timeticks unit [#11183](https://github.com/grafana/grafana/pull/11183), thx [@jtyr](https://github.com/jtyr)
* **Units**: Concentration units and "Normal cubic metre" [#11211](https://github.com/grafana/grafana/issues/11211), thx [@flopp999](https://github.com/flopp999)
* **Units**: New currency - Czech koruna [#11384](https://github.com/grafana/grafana/pull/11384), thx [@Rohlik](https://github.com/Rohlik)
* **Avatar**: Fix DISABLE_GRAVATAR option [#11095](https://github.com/grafana/grafana/issues/11095)
* **Heatmap**: Disable log scale when using time time series buckets [#10792](https://github.com/grafana/grafana/issues/10792)
* **Provisioning**: Remove `id` from json when provisioning dashboards, [#11138](https://github.com/grafana/grafana/issues/11138)
* **Prometheus**: tooltip for legend format not showing properly [#11516](https://github.com/grafana/grafana/issues/11516), thx [@svenklemm](https://github.com/svenklemm)
* **Playlist**: Empty playlists cannot be deleted [#11133](https://github.com/grafana/grafana/issues/11133), thx [@kichristensen](https://github.com/kichristensen)
* **Switch Orgs**: Alphabetic order in Switch Organization modal [#11556](https://github.com/grafana/grafana/issues/11556)
* **Postgres**: improve `$__timeFilter` macro [#11578](https://github.com/grafana/grafana/issues/11578), thx [@svenklemm](https://github.com/svenklemm)
* **Permission list**: Improved ux [#10747](https://github.com/grafana/grafana/issues/10747)
* **Dashboard**: Sizing and positioning of settings menu icons [#11572](https://github.com/grafana/grafana/pull/11572)
* **Dashboard**: Add search filter/tabs to new panel control [#10427](https://github.com/grafana/grafana/issues/10427)
* **Folders**: User with org viewer role should not be able to save/move dashboards in/to general folder [#11553](https://github.com/grafana/grafana/issues/11553)
* **Influxdb**: Dont assume the first column in table response is time. [#11476](https://github.com/grafana/grafana/issues/11476), thx [@hahnjo](https://github.com/hahnjo)
### Tech
* Backend code simplification [#11613](https://github.com/grafana/grafana/pull/11613), thx [@knweiss](https://github.com/knweiss)
* Add codespell to CI [#11602](https://github.com/grafana/grafana/pull/11602), thx [@mjtrangoni](https://github.com/mjtrangoni)
* Migrated JavaScript files to TypeScript
# 5.0.4 (2018-03-28)
* **Docker** Can't start Grafana on Kubernetes 1.7.14, 1.8.9, or 1.9.4 [#140 in grafana-docker repo](https://github.com/grafana/grafana-docker/issues/140) thx [@suquant](https://github.com/suquant)
* **Dashboard** Fixed bug where collapsed panels could not be directly linked to/renderer [#11114](https://github.com/grafana/grafana/issues/11114) & [#11086](https://github.com/grafana/grafana/issues/11086) & [#11296](https://github.com/grafana/grafana/issues/11296)
* **Dashboard** Provisioning dashboard with alert rules should create alerts [#11247](https://github.com/grafana/grafana/issues/11247)
* **Snapshots** For snapshots, the Graph panel renders the legend incorrectly on right hand side [#11318](https://github.com/grafana/grafana/issues/11318)
* **Alerting** Link back to Grafana returns wrong URL if root_path contains sub-path components [#11403](https://github.com/grafana/grafana/issues/11403)
* **Alerting** Incorrect default value for upload images setting for alert notifiers [#11413](https://github.com/grafana/grafana/pull/11413)
# 5.0.3 (2018-03-16)
* **Mysql**: Mysql panic occurring occasionally upon Grafana dashboard access (a bigger patch than the one in 5.0.2) [#11155](https://github.com/grafana/grafana/issues/11155)
# 5.0.2 (2018-03-14)
* **Mysql**: Mysql panic occurring occasionally upon Grafana dashboard access [#11155](https://github.com/grafana/grafana/issues/11155)
* **Dashboards**: Should be possible to browse dashboard using only uid [#11231](https://github.com/grafana/grafana/issues/11231)
* **Alerting**: Fixes bug where alerts from hidden panels where deleted [#11222](https://github.com/grafana/grafana/issues/11222)
* **Import**: Fixes bug where dashboards with alerts couldn't be imported [#11227](https://github.com/grafana/grafana/issues/11227)
* **Teams**: Remove quota restrictions from teams [#11220](https://github.com/grafana/grafana/issues/11220)
* **Render**: Fixes bug with legacy url redirection for panel rendering [#11180](https://github.com/grafana/grafana/issues/11180)
# 5.0.1 (2018-03-08)
* **Postgres**: PostgreSQL error when using ipv6 address as hostname in connection string [#11055](https://github.com/grafana/grafana/issues/11055), thanks [@svenklemm](https://github.com/svenklemm)
* **Dashboards**: Changing templated value from dropdown is causing unsaved changes [#11063](https://github.com/grafana/grafana/issues/11063)
* **Prometheus**: Fixes bundled Prometheus 2.0 dashboard [#11016](https://github.com/grafana/grafana/issues/11016), thx [@roidelapluie](https://github.com/roidelapluie)
* **Sidemenu**: Profile menu "invisible" when gravatar is disabled [#11097](https://github.com/grafana/grafana/issues/11097)
* **Dashboard**: Fixes a bug with resizeable handles for panels [#11103](https://github.com/grafana/grafana/issues/11103)
* **Dashboard**: Fixes a bug with resizable handles for panels [#11103](https://github.com/grafana/grafana/issues/11103)
* **Alerting**: Telegram inline image mode fails when caption too long [#10975](https://github.com/grafana/grafana/issues/10975)
* **Alerting**: Fixes silent failing validation [#11145](https://github.com/grafana/grafana/pull/11145)
* **OAuth**: Only use jwt token if it contains an email address [#11127](https://github.com/grafana/grafana/pull/11127)
@ -78,7 +153,7 @@ Grafana v5.0 is going to be the biggest and most foundational release Grafana ha
### New Major Features
- **Dashboards** Dashboard folders, [#1611](https://github.com/grafana/grafana/issues/1611)
- **Teams** User groups (teams) implemented. Can be used in folder & dashboard permission list.
- **Dashboard grid**: Panels are now layed out in a two dimensional grid (with x, y, w, h). [#9093](https://github.com/grafana/grafana/issues/9093).
- **Dashboard grid**: Panels are now laid out in a two dimensional grid (with x, y, w, h). [#9093](https://github.com/grafana/grafana/issues/9093).
- **Templating**: Vertical repeat direction for panel repeats.
- **UX**: Major update to page header and navigation
- **Dashboard settings**: Combine dashboard settings views into one with side menu, [#9750](https://github.com/grafana/grafana/issues/9750)
@ -112,7 +187,7 @@ Dashboard panels and rows are positioned using a gridPos object `{x: 0, y: 0, w:
* **Dashboard history**: New config file option versions_to_keep sets how many versions per dashboard to store, [#9671](https://github.com/grafana/grafana/issues/9671)
* **Dashboard as cfg**: Load dashboards from file into Grafana on startup/change [#9654](https://github.com/grafana/grafana/issues/9654) [#5269](https://github.com/grafana/grafana/issues/5269)
* **Prometheus**: Grafana can now send alerts to Prometheus Alertmanager while firing [#7481](https://github.com/grafana/grafana/issues/7481), thx [@Thib17](https://github.com/Thib17) and [@mtanda](https://github.com/mtanda)
* **Table**: Support multiple table formated queries in table panel [#9170](https://github.com/grafana/grafana/issues/9170), thx [@davkal](https://github.com/davkal)
* **Table**: Support multiple table formatted queries in table panel [#9170](https://github.com/grafana/grafana/issues/9170), thx [@davkal](https://github.com/davkal)
* **Security**: Protect against brute force (frequent) login attempts [#7616](https://github.com/grafana/grafana/issues/7616)
## Minor
@ -134,7 +209,7 @@ Dashboard panels and rows are positioned using a gridPos object `{x: 0, y: 0, w:
* **Sensu**: Send alert message to sensu output [#9551](https://github.com/grafana/grafana/issues/9551), thx [@cjchand](https://github.com/cjchand)
* **Singlestat**: suppress error when result contains no datapoints [#9636](https://github.com/grafana/grafana/issues/9636), thx [@utkarshcmu](https://github.com/utkarshcmu)
* **Postgres/MySQL**: Control quoting in SQL-queries when using template variables [#9030](https://github.com/grafana/grafana/issues/9030), thanks [@svenklemm](https://github.com/svenklemm)
* **Pagerduty**: Pagerduty dont auto resolve incidents by default anymore. [#10222](https://github.com/grafana/grafana/issues/10222)
* **Pagerduty**: Pagerduty don't auto resolve incidents by default anymore. [#10222](https://github.com/grafana/grafana/issues/10222)
* **Cloudwatch**: Fix for multi-valued templated queries. [#9903](https://github.com/grafana/grafana/issues/9903)
## Tech
@ -212,7 +287,7 @@ The following properties have been deprecated and will be removed in a future re
* **Annotations**: Add support for creating annotations from graph panel [#8197](https://github.com/grafana/grafana/pull/8197)
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Graph**: Add support for local formatting in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/2764)
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)
@ -249,7 +324,7 @@ The following properties have been deprecated and will be removed in a future re
* **Graphite**: Fix for Grafana internal metrics to Graphite sending NaN values [#9279](https://github.com/grafana/grafana/issues/9279)
* **HTTP API**: Fix for HEAD method requests [#9307](https://github.com/grafana/grafana/issues/9307)
* **Templating**: Fix for duplicate template variable queries when refresh is set to time range change [#9185](https://github.com/grafana/grafana/issues/9185)
* **Metrics**: dont write NaN values to graphite [#9279](https://github.com/grafana/grafana/issues/9279)
* **Metrics**: don't write NaN values to graphite [#9279](https://github.com/grafana/grafana/issues/9279)
# 4.5.1 (2017-09-15)
@ -286,12 +361,12 @@ The following properties have been deprecated and will be removed in a future re
### Breaking change
* **InfluxDB/Elasticsearch**: The panel & data source option named "Group by time interval" is now named "Min time interval" and does now always define a lower limit for the auto group by time. Without having to use `>` prefix (that prefix still works). This should in theory have close to zero actual impact on existing dashboards. It does mean that if you used this setting to define a hard group by time interval of, say "1d", if you zoomed to a time range wide enough the time range could increase above the "1d" range as the setting is now always considered a lower limit.
* **Elasticsearch**: Elasticsearch metric queries without date histogram now return table formated data making table panel much easier to use for this use case. Should not break/change existing dashboards with stock panels but external panel plugins can be affected.
* **Elasticsearch**: Elasticsearch metric queries without date histogram now return table formatted data making table panel much easier to use for this use case. Should not break/change existing dashboards with stock panels but external panel plugins can be affected.
## Changes
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
* **InfluxDB**: Added parenthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
## Bug Fixes
@ -303,7 +378,7 @@ The following properties have been deprecated and will be removed in a future re
## Bug Fixes
* **Search**: Fix for issue that casued search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
* **Search**: Fix for issue that caused search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
* **Modals**: ESC key now closes modal again, fixes [#8981](https://github.com/grafana/grafana/issues/8988), thx [@j-white](https://github.com/j-white)
# 4.4.2 (2017-08-01)
@ -642,12 +717,12 @@ due to too many connections/file handles on the data source backend. This proble
### Enhancements
* **Login**: Adds option to disable username/password logins, closes [#4674](https://github.com/grafana/grafana/issues/4674)
* **SingleStat**: Add seriename as option in singlestat panel, closes [#4740](https://github.com/grafana/grafana/issues/4740)
* **Localization**: Week start day now dependant on browser locale setting, closes [#3003](https://github.com/grafana/grafana/issues/3003)
* **Localization**: Week start day now dependent on browser locale setting, closes [#3003](https://github.com/grafana/grafana/issues/3003)
* **Templating**: Update panel repeats for variables that change on time refresh, closes [#5021](https://github.com/grafana/grafana/issues/5021)
* **Templating**: Add support for numeric and alphabetical sorting of variable values, closes [#2839](https://github.com/grafana/grafana/issues/2839)
* **Elasticsearch**: Support to set Precision Threshold for Unique Count metric, closes [#4689](https://github.com/grafana/grafana/issues/4689)
* **Navigation**: Add search to org swithcer, closes [#2609](https://github.com/grafana/grafana/issues/2609)
* **Database**: Allow database config using one propertie, closes [#5456](https://github.com/grafana/grafana/pull/5456)
* **Database**: Allow database config using one property, closes [#5456](https://github.com/grafana/grafana/pull/5456)
* **Graphite**: Add support for groupByNodes, closes [#5613](https://github.com/grafana/grafana/pull/5613)
* **Influxdb**: Add support for elapsed(), closes [#5827](https://github.com/grafana/grafana/pull/5827)
* **OpenTSDB**: Add support for explicitTags for OpenTSDB>=2.3, closes [#6360](https://github.com/grafana/grafana/pull/6361)
@ -714,7 +789,7 @@ due to too many connections/file handles on the data source backend. This proble
* **Datasource**: Pending data source requests are cancelled before new ones are issues (Graphite & Prometheus), closes [#5321](https://github.com/grafana/grafana/issues/5321)
### Breaking changes
* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log ouput.
* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log output.
* **Graphite** : The Graph panel no longer have a Graphite PNG option. closes [#5367](https://github.com/grafana/grafana/issues/5367)
### Bug fixes
@ -732,7 +807,7 @@ due to too many connections/file handles on the data source backend. This proble
* **Annotations**: Annotations can now use a template variable as data source, closes [#5054](https://github.com/grafana/grafana/issues/5054)
* **Time picker**: Fixed issue timepicker and UTC when reading time from URL, fixes [#5078](https://github.com/grafana/grafana/issues/5078)
* **CloudWatch**: Support for Multiple Account by AssumeRole, closes [#3522](https://github.com/grafana/grafana/issues/3522)
* **Singlestat**: Fixed alignment and minium height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
* **Singlestat**: Fixed alignment and minimum height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
* **Share modal**: Fixed link when using grafana under dashboard sub url, fixes [#5109](https://github.com/grafana/grafana/issues/5109)
* **Prometheus**: Fixed bug in query editor that caused it not to load when reloading page, fixes [#5107](https://github.com/grafana/grafana/issues/5107)
* **Elasticsearch**: Fixed bug when template variable query returns numeric values, fixes [#5097](https://github.com/grafana/grafana/issues/5097), fixes [#5088](https://github.com/grafana/grafana/issues/5088)
@ -749,7 +824,7 @@ due to too many connections/file handles on the data source backend. This proble
* **Graph**: Fixed broken PNG rendering in graph panel, fixes [#5025](https://github.com/grafana/grafana/issues/5025)
* **Graph**: Fixed broken xaxis on graph panel, fixes [#5024](https://github.com/grafana/grafana/issues/5024)
* **Influxdb**: Fixes crash when hiding middle serie, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
* **Influxdb**: Fixes crash when hiding middle series, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
# 3.0.1 Stable (2016-05-11)
@ -761,7 +836,7 @@ due to too many connections/file handles on the data source backend. This proble
### Bug fixes
* **Dashboard title**: Fixed max dashboard title width (media query) for large screens, fixes [#4859](https://github.com/grafana/grafana/issues/4859)
* **Annotations**: Fixed issue with entering annotation edit view, fixes [#4857](https://github.com/grafana/grafana/issues/4857)
* **Remove query**: Fixed issue with removing query for data sources without collapsable query editors, fixes [#4856](https://github.com/grafana/grafana/issues/4856)
* **Remove query**: Fixed issue with removing query for data sources without collapsible query editors, fixes [#4856](https://github.com/grafana/grafana/issues/4856)
* **Graphite PNG**: Fixed issue graphite png rendering option, fixes [#4864](https://github.com/grafana/grafana/issues/4864)
* **InfluxDB**: Fixed issue missing plus group by iconn, fixes [#4862](https://github.com/grafana/grafana/issues/4862)
* **Graph**: Fixes missing line mode for thresholds, fixes [#4902](https://github.com/grafana/grafana/pull/4902)
@ -777,11 +852,11 @@ due to too many connections/file handles on the data source backend. This proble
### Bug fixes
* **InfluxDB 0.12**: Fixed issue templating and `show tag values` query only returning tags for first measurement, fixes [#4726](https://github.com/grafana/grafana/issues/4726)
* **Templating**: Fixed issue with regex formating when matching multiple values, fixes [#4755](https://github.com/grafana/grafana/issues/4755)
* **Templating**: Fixed issue with regex formatting when matching multiple values, fixes [#4755](https://github.com/grafana/grafana/issues/4755)
* **Templating**: Fixed issue with custom all value and escaping, fixes [#4736](https://github.com/grafana/grafana/issues/4736)
* **Dashlist**: Fixed issue dashboard list panel and caching tags, fixes [#4768](https://github.com/grafana/grafana/issues/4768)
* **Graph**: Fixed issue with unneeded scrollbar in legend for Firefox, fixes [#4760](https://github.com/grafana/grafana/issues/4760)
* **Table panel**: Fixed issue table panel formating string array properties, fixes [#4791](https://github.com/grafana/grafana/issues/4791)
* **Table panel**: Fixed issue table panel formatting string array properties, fixes [#4791](https://github.com/grafana/grafana/issues/4791)
* **grafana-cli**: Improve error message when failing to install plugins due to corrupt response, fixes [#4651](https://github.com/grafana/grafana/issues/4651)
* **Singlestat**: Fixes prefix an postfix for gauges, fixes [#4812](https://github.com/grafana/grafana/issues/4812)
* **Singlestat**: Fixes auto-refresh on change for some options, fixes [#4809](https://github.com/grafana/grafana/issues/4809)
@ -873,7 +948,7 @@ slack channel (link to slack channel in readme).
### Bug fixes
* **Playlist**: Fix for memory leak when running a playlist, closes [#3794](https://github.com/grafana/grafana/pull/3794)
* **InfluxDB**: Fix for InfluxDB and table panel when using Format As Table and having group by time, fixes [#3928](https://github.com/grafana/grafana/issues/3928)
* **Panel Time shift**: Fix for panel time range and using dashboard times liek `Today` and `This Week`, fixes [#3941](https://github.com/grafana/grafana/issues/3941)
* **Panel Time shift**: Fix for panel time range and using dashboard times like `Today` and `This Week`, fixes [#3941](https://github.com/grafana/grafana/issues/3941)
* **Row repeat**: Repeated rows will now appear next to each other and not by the bottom of the dashboard, fixes [#3942](https://github.com/grafana/grafana/issues/3942)
* **Png renderer**: Fix for phantomjs path on windows, fixes [#3657](https://github.com/grafana/grafana/issues/3657)
@ -897,7 +972,7 @@ slack channel (link to slack channel in readme).
### Bug Fixes
* **metric editors**: Fix for clicking typeahead auto dropdown option, fixes [#3428](https://github.com/grafana/grafana/issues/3428)
* **influxdb**: Fixed issue showing Group By label only on first query, fixes [#3453](https://github.com/grafana/grafana/issues/3453)
* **logging**: Add more verbose info logging for http reqeusts, closes [#3405](https://github.com/grafana/grafana/pull/3405)
* **logging**: Add more verbose info logging for http requests, closes [#3405](https://github.com/grafana/grafana/pull/3405)
# 2.6.0-Beta1 (2015-12-04)
@ -924,7 +999,7 @@ slack channel (link to slack channel in readme).
**New Feature: Mix data sources**
- A built in data source is now available named `-- Mixed --`, When picked in the metrics tab,
it allows you to add queries of differnet data source types & instances to the same graph/panel!
it allows you to add queries of different data source types & instances to the same graph/panel!
[Issue #436](https://github.com/grafana/grafana/issues/436)
**New Feature: Elasticsearch Metrics Query Editor and Viz Support**
@ -963,7 +1038,7 @@ it allows you to add queries of differnet data source types & instances to the s
- [Issue #2564](https://github.com/grafana/grafana/issues/2564). Templating: Another atempt at fixing #2534 (Init multi value template var used in repeat panel from url)
- [Issue #2620](https://github.com/grafana/grafana/issues/2620). Graph: multi series tooltip did no highlight correct point when stacking was enabled and series were of different resolution
- [Issue #2636](https://github.com/grafana/grafana/issues/2636). InfluxDB: Do no show template vars in dropdown for tag keys and group by keys
- [Issue #2604](https://github.com/grafana/grafana/issues/2604). InfluxDB: More alias options, can now use `$[0-9]` syntax to reference part of a measurement name (seperated by dots)
- [Issue #2604](https://github.com/grafana/grafana/issues/2604). InfluxDB: More alias options, can now use `$[0-9]` syntax to reference part of a measurement name (separated by dots)
**Breaking Changes**
- Notice to makers/users of custom data sources, there is a minor breaking change in 2.2 that
@ -1045,7 +1120,7 @@ Grunt & Watch tasks:
- [Issue #1826](https://github.com/grafana/grafana/issues/1826). User role 'Viewer' are now prohibited from entering edit mode (and doing other transient dashboard edits). A new role `Read Only Editor` will replace the old Viewer behavior
- [Issue #1928](https://github.com/grafana/grafana/issues/1928). HTTP API: GET /api/dashboards/db/:slug response changed property `model` to `dashboard` to match the POST request nameing
- Backend render URL changed from `/render/dashboard/solo` `render/dashboard-solo/` (in order to have consistent dashboard url `/dashboard/:type/:slug`)
- Search HTTP API response has changed (simplified), tags list moved to seperate HTTP resource URI
- Search HTTP API response has changed (simplified), tags list moved to separate HTTP resource URI
- Datasource HTTP api breaking change, ADD datasource is now POST /api/datasources/, update is now PUT /api/datasources/:id
**Fixes**
@ -1062,7 +1137,7 @@ Grunt & Watch tasks:
# 2.0.2 (2015-04-22)
**Fixes**
- [Issue #1832](https://github.com/grafana/grafana/issues/1832). Graph Panel + Legend Table mode: Many series casued zero height graph, now legend will never reduce the height of the graph below 50% of row height.
- [Issue #1832](https://github.com/grafana/grafana/issues/1832). Graph Panel + Legend Table mode: Many series caused zero height graph, now legend will never reduce the height of the graph below 50% of row height.
- [Issue #1846](https://github.com/grafana/grafana/issues/1846). Snapshots: Fixed issue with snapshoting dashboards with an interval template variable
- [Issue #1848](https://github.com/grafana/grafana/issues/1848). Panel timeshift: You can now use panel timeshift without a relative time override
@ -1104,7 +1179,7 @@ Grunt & Watch tasks:
**Fixes**
- [Issue #1649](https://github.com/grafana/grafana/issues/1649). HTTP API: grafana /render calls nows with api keys
- [Issue #1667](https://github.com/grafana/grafana/issues/1667). Datasource proxy & session timeout fix (casued 401 Unauthorized error after a while)
- [Issue #1667](https://github.com/grafana/grafana/issues/1667). Datasource proxy & session timeout fix (caused 401 Unauthorized error after a while)
- [Issue #1707](https://github.com/grafana/grafana/issues/1707). Unsaved changes: Do not show for snapshots, scripted and file based dashboards
- [Issue #1703](https://github.com/grafana/grafana/issues/1703). Unsaved changes: Do not show for users with role `Viewer`
- [Issue #1675](https://github.com/grafana/grafana/issues/1675). Data source proxy: Fixed issue with Gzip enabled and data source proxy
@ -1117,14 +1192,14 @@ Grunt & Watch tasks:
**Important Note**
Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated backend server. Please read the [Documentation](http://docs.grafana.org) for more detailed about this SIGNIFCANT change to Grafana
Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated backend server. Please read the [Documentation](http://docs.grafana.org) for more detailed about this SIGNIFICANT change to Grafana
**New features**
- [Issue #1623](https://github.com/grafana/grafana/issues/1623). Share Dashboard: Dashboard snapshot sharing (dash and data snapshot), save to local or save to public snapshot dashboard snapshots.raintank.io site
- [Issue #1622](https://github.com/grafana/grafana/issues/1622). Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embedd a single graph on another web site
- [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes inbetween the user is promted with a warning if he really wants to overwrite the other's changes
- [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes in between the user is promted with a warning if he really wants to overwrite the other's changes
- [Issue #1331](https://github.com/grafana/grafana/issues/1331). Graph & Singlestat: New axis/unit format selector and more units (kbytes, Joule, Watt, eV), and new design for graph axis & grid tab and single stat options tab views
- [Issue #1241](https://github.com/grafana/grafana/issues/1242). Timepicker: New option in timepicker (under dashboard settings), to change ``now`` to be for example ``now-1m``, usefull when you want to ignore last minute because it contains incomplete data
- [Issue #1241](https://github.com/grafana/grafana/issues/1242). Timepicker: New option in timepicker (under dashboard settings), to change ``now`` to be for example ``now-1m``, useful when you want to ignore last minute because it contains incomplete data
- [Issue #171](https://github.com/grafana/grafana/issues/171). Panel: Different time periods, panels can override dashboard relative time and/or add a time shift
- [Issue #1488](https://github.com/grafana/grafana/issues/1488). Dashboard: Clone dashboard / Save as
- [Issue #1458](https://github.com/grafana/grafana/issues/1458). User: persisted user option for dark or light theme (no longer an option on a dashboard)
@ -1155,7 +1230,7 @@ Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated
**OpenTSDB breaking change**
- [Issue #1438](https://github.com/grafana/grafana/issues/1438). OpenTSDB: Automatic downsample interval passed to OpenTSDB (depends on timespan and graph width)
- NOTICE, Downsampling is now enabled by default, so if you have not picked a downsample aggregator in your metric query do so or your graphs will be missleading
- NOTICE, Downsampling is now enabled by default, so if you have not picked a downsample aggregator in your metric query do so or your graphs will be misleading
- This will make Grafana a lot quicker for OpenTSDB users when viewing large time spans without having to change the downsample interval manually.
**Tech**
@ -1186,7 +1261,7 @@ Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated
- [Issue #1114](https://github.com/grafana/grafana/issues/1114). Graphite: Lexer fix, allow equal sign (=) in metric paths
- [Issue #1136](https://github.com/grafana/grafana/issues/1136). Graph: Fix to legend value Max and negative values
- [Issue #1150](https://github.com/grafana/grafana/issues/1150). SinglestatPanel: Fixed absolute drilldown link issue
- [Issue #1123](https://github.com/grafana/grafana/issues/1123). Firefox: Workaround for Firefox bug, casued input text fields to not be selectable and not have placeable cursor
- [Issue #1123](https://github.com/grafana/grafana/issues/1123). Firefox: Workaround for Firefox bug, caused input text fields to not be selectable and not have placeable cursor
- [Issue #1108](https://github.com/grafana/grafana/issues/1108). Graph: Fix for tooltip series order when series draw order was changed with zindex property
# 1.9.0-rc1 (2014-11-17)
@ -1263,7 +1338,7 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #234](https://github.com/grafana/grafana/issues/234). Templating: Interval variable type for time intervals summarize/group by parameter, included "auto" option, and auto step counts option.
- [Issue #262](https://github.com/grafana/grafana/issues/262). Templating: Ability to use template variables for function parameters via custom variable type, can be used as parameter for movingAverage or scaleToSeconds for example
- [Issue #312](https://github.com/grafana/grafana/issues/312). Templating: Can now use template variables in panel titles
- [Issue #613](https://github.com/grafana/grafana/issues/613). Templating: Full support for InfluxDB, filter by part of series names, extract series substrings, nested queries, multipe where clauses!
- [Issue #613](https://github.com/grafana/grafana/issues/613). Templating: Full support for InfluxDB, filter by part of series names, extract series substrings, nested queries, multiple where clauses!
- Template variables can be initialized from url, with var-my_varname=value, breaking change, before it was just my_varname.
- Templating and url state sync has some issues that are not solved for this release, see [Issue #772](https://github.com/grafana/grafana/issues/772) for more details.
@ -1352,7 +1427,7 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #136](https://github.com/grafana/grafana/issues/136). Graph: New legend display option "Align as table"
- [Issue #556](https://github.com/grafana/grafana/issues/556). Graph: New legend display option "Right side", will show legend to the right of the graph
- [Issue #604](https://github.com/grafana/grafana/issues/604). Graph: New axis format, 'bps' (SI unit in steps of 1000) useful for network gear metics
- [Issue #626](https://github.com/grafana/grafana/issues/626). Graph: Downscale y axis to more precise unit, value of 0.1 for seconds format will be formated as 100 ms. Thanks @kamaradclimber
- [Issue #626](https://github.com/grafana/grafana/issues/626). Graph: Downscale y axis to more precise unit, value of 0.1 for seconds format will be formatted as 100 ms. Thanks @kamaradclimber
- [Issue #618](https://github.com/grafana/grafana/issues/618). OpenTSDB: Series alias option to override metric name returned from opentsdb. Thanks @heldr
**Documentation**
@ -1382,13 +1457,13 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #522](https://github.com/grafana/grafana/issues/522). Series names and column name typeahead cache fix
- [Issue #504](https://github.com/grafana/grafana/issues/504). Fixed influxdb issue with raw query that caused wrong value column detection
- [Issue #526](https://github.com/grafana/grafana/issues/526). Default property that marks which datasource is default in config.js is now optional
- [Issue #342](https://github.com/grafana/grafana/issues/342). Auto-refresh caused 2 refreshes (and hence mulitple queries) each time (at least in firefox)
- [Issue #342](https://github.com/grafana/grafana/issues/342). Auto-refresh caused 2 refreshes (and hence multiple queries) each time (at least in firefox)
# 1.6.0 (2014-06-16)
#### New features or improvements
- [Issue #427](https://github.com/grafana/grafana/issues/427). New Y-axis formater for metric values that represent seconds, Thanks @jippi
- [Issue #390](https://github.com/grafana/grafana/issues/390). Allow special characters in serie names (influxdb datasource), Thanks @majst01
- [Issue #390](https://github.com/grafana/grafana/issues/390). Allow special characters in series names (influxdb datasource), Thanks @majst01
- [Issue #428](https://github.com/grafana/grafana/issues/428). Refactoring of filterSrv, Thanks @Tetha
- [Issue #445](https://github.com/grafana/grafana/issues/445). New config for playlist feature. Set playlist_timespan to set default playlist interval, Thanks @rmca
- [Issue #461](https://github.com/grafana/grafana/issues/461). New graphite function definition added isNonNull, Thanks @tmonk42
@ -1409,13 +1484,13 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #475](https://github.com/grafana/grafana/issues/475). Add panel icon and Row edit button is replaced by the Row edit menu
- New graphs now have a default empty query
- Add Row button now creates a row with default height of 250px (no longer opens dashboard settings modal)
- Clean up of config.sample.js, graphiteUrl removed (still works, but depricated, removed in future)
- Clean up of config.sample.js, graphiteUrl removed (still works, but deprecated, removed in future)
Use datasources config instead. panel_names removed from config.js. Use plugins.panels to add custom panels
- Graphite panel is now renamed graph (Existing dashboards will still work)
#### Fixes
- [Issue #126](https://github.com/grafana/grafana/issues/126). Graphite query lexer change, can now handle regex parameters for aliasSub function
- [Issue #447](https://github.com/grafana/grafana/issues/447). Filter option loading when having muliple nested filters now works better. Options are now reloaded correctly and there are no multiple renders/refresh inbetween.
- [Issue #447](https://github.com/grafana/grafana/issues/447). Filter option loading when having muliple nested filters now works better. Options are now reloaded correctly and there are no multiple renders/refresh in between.
- [Issue #412](https://github.com/grafana/grafana/issues/412). After a filter option is changed and a nested template param is reloaded, if the current value exists after the options are reloaded the current selected value is kept.
- [Issue #460](https://github.com/grafana/grafana/issues/460). Legend Current value did not display when value was zero
- [Issue #328](https://github.com/grafana/grafana/issues/328). Fix to series toggling bug that caused annotations to be hidden when toggling/hiding series.

221
Gopkg.lock generated
View File

@ -27,37 +27,7 @@
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/shareddefaults",
"private/protocol",
"private/protocol/ec2query",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/cloudwatch",
"service/ec2",
"service/ec2/ec2iface",
"service/s3",
"service/sts"
]
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/ec2query","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/cloudwatch","service/ec2","service/ec2/ec2iface","service/s3","service/sts"]
revision = "decd990ddc5dcdf2f73309cbcab90d06b996ca28"
version = "v1.12.67"
@ -103,6 +73,11 @@
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/denisenkom/go-mssqldb"
packages = [".","internal/cp"]
revision = "270bc3860bb94dd3a3ffd047377d746c5e276726"
[[projects]]
name = "github.com/fatih/color"
packages = ["."]
@ -142,13 +117,7 @@
[[projects]]
branch = "master"
name = "github.com/go-macaron/session"
packages = [
".",
"memcache",
"mysql",
"postgres",
"redis"
]
packages = [".","memcache","postgres","redis"]
revision = "b8e286a0dba8f4999042d6b258daf51b31d08938"
[[projects]]
@ -171,23 +140,19 @@
[[projects]]
name = "github.com/go-xorm/core"
packages = ["."]
revision = "e8409d73255791843585964791443dbad877058c"
revision = "da1adaf7a28ca792961721a34e6e04945200c890"
version = "v0.5.7"
[[projects]]
name = "github.com/go-xorm/xorm"
packages = ["."]
revision = "6687a2b4e824f4d87f2d65060ec5cb0d896dff1e"
revision = "1933dd69e294c0a26c0266637067f24dbb25770c"
version = "v0.6.4"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
]
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "c65a0412e71e8b9b3bfd22925720d23c0f054237"
[[projects]]
@ -256,10 +221,7 @@
[[projects]]
name = "github.com/klauspost/compress"
packages = [
"flate",
"gzip"
]
packages = ["flate","gzip"]
revision = "6c8db69c4b49dd4df1fff66996cf556176d0b9bf"
version = "v1.2.1"
@ -290,10 +252,7 @@
[[projects]]
branch = "master"
name = "github.com/lib/pq"
packages = [
".",
"oid"
]
packages = [".","oid"]
revision = "61fe37aa2ee24fabcdbe5c4ac1d4ac566f88f345"
[[projects]]
@ -328,11 +287,7 @@
[[projects]]
name = "github.com/opentracing/opentracing-go"
packages = [
".",
"ext",
"log"
]
packages = [".","ext","log"]
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
version = "v1.0.2"
@ -344,12 +299,7 @@
[[projects]]
name = "github.com/prometheus/client_golang"
packages = [
"api",
"api/prometheus/v1",
"prometheus",
"prometheus/promhttp"
]
packages = ["api","api/prometheus/v1","prometheus","prometheus/promhttp"]
revision = "967789050ba94deca04a5e84cce8ad472ce313c1"
version = "v0.9.0-pre1"
@ -362,22 +312,13 @@
[[projects]]
branch = "master"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model"
]
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"]
revision = "89604d197083d4781071d3c65855d24ecfb0a563"
[[projects]]
branch = "master"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfsd",
"xfs"
]
packages = [".","internal/util","nfsd","xfs"]
revision = "85fadb6e89903ef7cca6f6a804474cd5ea85b6e1"
[[projects]]
@ -394,21 +335,13 @@
[[projects]]
name = "github.com/smartystreets/assertions"
packages = [
".",
"internal/go-render/render",
"internal/oglematchers"
]
packages = [".","internal/go-render/render","internal/oglematchers"]
revision = "0b37b35ec7434b77e77a4bb29b79677cced992ea"
version = "1.8.1"
[[projects]]
name = "github.com/smartystreets/goconvey"
packages = [
"convey",
"convey/gotest",
"convey/reporting"
]
packages = ["convey","convey/gotest","convey/reporting"]
revision = "9e8dc3f972df6c8fcc0375ef492c24d0bb204857"
version = "1.6.3"
@ -420,21 +353,7 @@
[[projects]]
name = "github.com/uber/jaeger-client-go"
packages = [
".",
"config",
"internal/baggage",
"internal/baggage/remote",
"internal/spanlog",
"log",
"rpcmetrics",
"thrift-gen/agent",
"thrift-gen/baggage",
"thrift-gen/jaeger",
"thrift-gen/sampling",
"thrift-gen/zipkincore",
"utils"
]
packages = [".","config","internal/baggage","internal/baggage/remote","internal/spanlog","log","rpcmetrics","thrift-gen/agent","thrift-gen/baggage","thrift-gen/jaeger","thrift-gen/sampling","thrift-gen/zipkincore","utils"]
revision = "3ac96c6e679cb60a74589b0d0aa7c70a906183f7"
version = "v2.11.2"
@ -446,10 +365,7 @@
[[projects]]
name = "github.com/yudai/gojsondiff"
packages = [
".",
"formatter"
]
packages = [".","formatter"]
revision = "7b1b7adf999dab73a6eb02669c3d82dbb27a3dd6"
version = "1.0.0"
@ -462,34 +378,19 @@
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["pbkdf2"]
packages = ["md4","pbkdf2"]
revision = "3d37316aaa6bd9929127ac9a527abf408178ea7b"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"lex/httplex",
"trace"
]
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt"
]
packages = [".","google","internal","jws","jwt"]
revision = "b28fcf2b08a19742b43084fb40ab78ac6c3d8067"
[[projects]]
@ -507,39 +408,12 @@
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
[[projects]]
name = "google.golang.org/appengine"
packages = [
".",
"cloudsql",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"urlfetch"
]
packages = [".","cloudsql","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
@ -551,32 +425,7 @@
[[projects]]
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"health",
"health/grpc_health_v1",
"internal",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport"
]
packages = [".","balancer","balancer/base","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","health","health/grpc_health_v1","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
revision = "6b51017f791ae1cfbec89c52efdf444b13b550ef"
version = "v1.9.2"
@ -598,12 +447,6 @@
revision = "567b2bfa514e796916c4747494d6ff5132a1dfce"
version = "v1"
[[projects]]
branch = "v2"
name = "gopkg.in/gomail.v2"
packages = ["."]
revision = "81ebce5c23dfd25c6c67194b37d3dd3f338c98b1"
[[projects]]
name = "gopkg.in/ini.v1"
packages = ["."]
@ -616,6 +459,12 @@
revision = "75f2e9b42e99652f0d82b28ccb73648f44615faa"
version = "v1.2.4"
[[projects]]
branch = "v2"
name = "gopkg.in/mail.v2"
packages = ["."]
revision = "5bc5c8bb07bd8d2803831fbaf8cbd630fcde2c68"
[[projects]]
name = "gopkg.in/redis.v2"
packages = ["."]
@ -631,6 +480,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "4de68f1342ba98a637ec8ca7496aeeae2021bf9e4c7c80db7924e14709151a62"
inputs-digest = "ad3c71fd3244369c313978e9e7464c7116faee764386439a17de0707a08103aa"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -85,13 +85,11 @@ ignored = [
[[constraint]]
name = "github.com/go-xorm/core"
revision = "e8409d73255791843585964791443dbad877058c"
#version = "0.5.7" //keeping this since we would rather depend on version then commit
version = "0.5.7"
[[constraint]]
name = "github.com/go-xorm/xorm"
revision = "6687a2b4e824f4d87f2d65060ec5cb0d896dff1e"
#version = "0.6.4" //keeping this since we would rather depend on version then commit
version = "0.6.4"
[[constraint]]
name = "github.com/gorilla/websocket"
@ -174,7 +172,7 @@ ignored = [
name = "golang.org/x/sync"
[[constraint]]
name = "gopkg.in/gomail.v2"
name = "gopkg.in/mail.v2"
branch = "v2"
[[constraint]]
@ -197,3 +195,7 @@ ignored = [
[[constraint]]
branch = "master"
name = "github.com/teris-io/shortid"
[[constraint]]
name = "github.com/denisenkom/go-mssqldb"
revision = "270bc3860bb94dd3a3ffd047377d746c5e276726"

View File

@ -11,8 +11,14 @@ deps: deps-js
build-go:
go run build.go build
build-server:
go run build.go build-server
build-cli:
go run build.go build-cli
build-js:
npm run build
yarn run build
build: build-go build-js
@ -20,7 +26,7 @@ test-go:
go test -v ./pkg/...
test-js:
npm test
yarn test
test: test-go test-js

View File

@ -9,6 +9,7 @@ upgrading Grafana please check here before creating an issue.
- [Datasource plugin written in typescript](https://github.com/grafana/typescript-template-datasource)
- [Simple json dataource plugin](https://github.com/grafana/simple-json-datasource)
- [Plugin development guide](http://docs.grafana.org/plugins/developing/development/)
- [Webpack Grafana plugin template project](https://github.com/CorpGlory/grafana-plugin-template-webpack)
## Changes in v4.6

View File

@ -9,9 +9,6 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB.
![](http://docs.grafana.org/assets/img/features/dashboard_ex1.png)
## Grafana v5 Alpha Preview
Grafana master is now v5.0 alpha. This is going to be the biggest and most foundational release Grafana has ever had, coming with a ton of UX improvements, a new dashboard grid engine, dashboard folders, user teams and permissions. Checkout out this [video preview](https://www.youtube.com/watch?v=BC_YRNpqj5k) of Grafana v5.
## Installation
Head to [docs.grafana.org](http://docs.grafana.org/installation/) and [download](https://grafana.com/get)
the latest release.
@ -27,13 +24,13 @@ the latest master builds [here](https://grafana.com/grafana/download)
### Dependencies
- Go 1.9
- Go 1.10
- NodeJS LTS
### Building the backend
```bash
go get github.com/grafana/grafana
cd ~/go/src/github.com/grafana/grafana
cd $GOPATH/src/github.com/grafana/grafana
go run build.go setup
go run build.go build
```

View File

@ -6,18 +6,21 @@ But it will give you an idea of our current vision and plan.
### Short term (1-2 months)
- v5.1
- Crossplatform builds & build speed improvements
- Build speed improvements & integration test execution
- Kubernetes friendly docker container
- Enterprise LDAP
- Provisioning workflow
- First login registration view
- IFQL Initial support
- MSSQL datasource
### Mid term (2-4 months)
- v5.2
- Azure monitor backend rewrite
- Elasticsearch alerting
- First login registration view
- Backend plugins? (alert notifiers, auth)
- Crossplatform builds
- IFQL Initial support
### Long term (4 - 8 months)

View File

@ -7,7 +7,7 @@ clone_folder: c:\gopath\src\github.com\grafana\grafana
environment:
nodejs_version: "6"
GOPATH: c:\gopath
GOVERSION: 1.9.2
GOVERSION: 1.10
install:
- rmdir c:\go /s /q

View File

@ -41,6 +41,7 @@ var (
includeBuildNumber bool = true
buildNumber int = 0
binaries []string = []string{"grafana-server", "grafana-cli"}
isDev bool = false
)
const minGoVersion = 1.8
@ -61,6 +62,7 @@ func main() {
flag.BoolVar(&race, "race", race, "Use race detector")
flag.BoolVar(&includeBuildNumber, "includeBuildNumber", includeBuildNumber, "IncludeBuildNumber in package name")
flag.IntVar(&buildNumber, "buildNumber", 0, "Build number from CI system")
flag.BoolVar(&isDev, "dev", isDev, "optimal for development, skips certain steps")
flag.Parse()
readVersionFromPackageJson()
@ -79,10 +81,18 @@ func main() {
case "setup":
setup()
case "build-srv":
clean()
build("grafana-server", "./pkg/cmd/grafana-server", []string{})
case "build-cli":
clean()
build("grafana-cli", "./pkg/cmd/grafana-cli", []string{})
case "build-server":
clean()
build("grafana-server", "./pkg/cmd/grafana-server", []string{})
case "build":
clean()
for _, binary := range binaries {
@ -386,7 +396,9 @@ func build(binaryName, pkg string, tags []string) {
binary += ".exe"
}
rmr(binary, binary+".md5")
if !isDev {
rmr(binary, binary+".md5")
}
args := []string{"build", "-ldflags", ldflags()}
if len(tags) > 0 {
args = append(args, "-tags", strings.Join(tags, ","))
@ -397,16 +409,21 @@ func build(binaryName, pkg string, tags []string) {
args = append(args, "-o", binary)
args = append(args, pkg)
setBuildEnv()
runPrint("go", "version")
if !isDev {
setBuildEnv()
runPrint("go", "version")
}
runPrint("go", args...)
// Create an md5 checksum of the binary, to be included in the archive for
// automatic upgrades.
err := md5File(binary)
if err != nil {
log.Fatal(err)
if !isDev {
// Create an md5 checksum of the binary, to be included in the archive for
// automatic upgrades.
err := md5File(binary)
if err != nil {
log.Fatal(err)
}
}
}
@ -427,6 +444,10 @@ func rmr(paths ...string) {
}
func clean() {
if isDev {
return
}
rmr("dist")
rmr("tmp")
rmr(filepath.Join(os.Getenv("GOPATH"), fmt.Sprintf("pkg/%s_%s/github.com/grafana", goos, goarch)))
@ -542,7 +563,7 @@ func shaFilesInDist() {
return nil
}
if strings.Contains(path, ".sha256") == false {
if !strings.Contains(path, ".sha256") {
err := shaFile(path)
if err != nil {
log.Printf("Failed to create sha file. error: %v\n", err)

View File

@ -1,57 +0,0 @@
machine:
node:
version: 6.11.4
python:
version: 2.7.3
services:
- docker
environment:
GOPATH: "/home/ubuntu/.go_workspace"
ORG_PATH: "github.com/grafana"
REPO_PATH: "${ORG_PATH}/grafana"
GODIST: "go1.9.3.linux-amd64.tar.gz"
post:
- mkdir -p ~/download
- mkdir -p ~/docker
- test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST
- sudo rm -rf /usr/local/go
- sudo tar -C /usr/local -xzf download/$GODIST
dependencies:
cache_directories:
- "~/docker"
- "~/download"
override:
- rm -rf ${GOPATH}/src/${REPO_PATH}
- mkdir -p ${GOPATH}/src/${ORG_PATH}
- cp -r ~/grafana ${GOPATH}/src/${ORG_PATH}
pre:
- pip install awscli
- sudo apt-get update; sudo apt-get install rpm; sudo apt-get install expect
- ./scripts/build/build_container.sh
test:
override:
- bash scripts/circle-test-frontend.sh
- bash scripts/circle-test-backend.sh
deployment:
gh_branch:
branch: master
commands:
- ./scripts/build/deploy.sh
- ./scripts/build/sign_packages.sh
- go run build.go sha-dist
- aws s3 sync ./dist s3://$BUCKET_NAME/master
- ./scripts/trigger_windows_build.sh ${APPVEYOR_TOKEN} ${CIRCLE_SHA1} master
- ./scripts/trigger_docker_build.sh ${TRIGGER_GRAFANA_PACKER_CIRCLECI_TOKEN}
- go run ./scripts/build/publish.go -apiKey ${GRAFANA_COM_API_KEY}
gh_tag:
tag: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
commands:
- ./scripts/build/deploy.sh
- ./scripts/build/sign_packages.sh
- go run build.go sha-dist
- aws s3 sync ./dist s3://$BUCKET_NAME/release
- ./scripts/trigger_windows_build.sh ${APPVEYOR_TOKEN} ${CIRCLE_SHA1} release
- ./scripts/trigger_docker_build.sh ${TRIGGER_GRAFANA_PACKER_CIRCLECI_TOKEN} ${CIRCLE_TAG}

View File

@ -8,6 +8,4 @@ coverage:
patch: yes
changes: no
comment:
layout: "diff"
behavior: "once"
comment: off

View File

@ -82,6 +82,9 @@ max_idle_conn = 2
# Max conn setting default is 0 (mean not set)
max_open_conn =
# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
conn_max_lifetime = 14400
# Set to true to log the sql calls and execution times.
log_queries =
@ -125,6 +128,9 @@ cookie_secure = false
session_life_time = 86400
gc_interval_time = 86400
# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
conn_max_lifetime = 14400
#################################### Data proxy ###########################
[dataproxy]

View File

@ -4,10 +4,10 @@
# change
# possible values : production, development
; app_mode = production
;app_mode = production
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
; instance_name = ${HOSTNAME}
;instance_name = ${HOSTNAME}
#################################### Paths ####################################
[paths]
@ -21,7 +21,7 @@
;plugins = /var/lib/grafana/plugins
# folder that contains provisioning config files that grafana will apply on startup and while running.
; provisioning = conf/provisioning
;provisioning = conf/provisioning
#################################### Server ####################################
[server]
@ -64,7 +64,7 @@
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as seperate properties or as on string using the url propertie.
# as separate properties or as on string using the url properties.
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3
@ -90,6 +90,9 @@
# Max conn setting default is 0 (mean not set)
;max_open_conn =
# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
;conn_max_lifetime = 14400
# Set to true to log the sql calls and execution times.
log_queries =
@ -121,7 +124,6 @@ log_queries =
# This enables data proxy logging, default is false
;logging = false
#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
@ -323,7 +325,6 @@ log_queries =
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
;filters =
# For "console" mode only
[log.console]
;level =
@ -369,7 +370,6 @@ log_queries =
# Syslog tag. By default, the process' argv[0] is used.
;tag =
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features

View File

@ -6,3 +6,10 @@
- "9300:9300"
volumes:
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
fake-elastic-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: elasticsearch
FD_PORT: 9200

View File

@ -6,3 +6,10 @@
ports:
- "10200:9200"
- "10300:9300"
fake-elastic5-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: elasticsearch
FD_PORT: 10200

View File

@ -38,7 +38,7 @@ CACHE_QUERY_PORT = 7002
LOG_UPDATES = False
# Enable AMQP if you want to receve metrics using an amqp broker
# Enable AMQP if you want to receive metrics using an amqp broker
# ENABLE_AMQP = False
# Verbose means a line will be logged for every metric received

File diff suppressed because it is too large Load Diff

View File

@ -41,7 +41,7 @@ PICKLE_RECEIVER_PORT = 2004
CACHE_QUERY_INTERFACE = 0.0.0.0
CACHE_QUERY_PORT = 7002
# Enable AMQP if you want to receve metrics using you amqp broker
# Enable AMQP if you want to receive metrics using you amqp broker
ENABLE_AMQP = True
# Verbose means a line will be logged for every metric received

View File

@ -265,7 +265,7 @@ WHISPER_FALLOCATE_CREATE = True
# CARBON_METRIC_PREFIX = carbon
# CARBON_METRIC_INTERVAL = 60
# Enable AMQP if you want to receve metrics using an amqp broker
# Enable AMQP if you want to receive metrics using an amqp broker
# ENABLE_AMQP = False
# Verbose means a line will be logged for every metric received

View File

@ -30,7 +30,7 @@ give_completer_focus = shift-space
# pertain only to specific metric types.
#
# The dashboard presents only metrics that fall into specified naming schemes
# defined in this file. This creates a simpler, more targetted view of the
# defined in this file. This creates a simpler, more targeted view of the
# data. The general form for defining a naming scheme is as follows:
#
#[Metric Type]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,18 @@
graphite11:
image: graphiteapp/graphite-statsd
ports:
- "8180:80"
- "2103-2104:2003-2004"
- "2123-2124:2023-2024"
- "8225:8125/udp"
- "8226:8126"
fake-graphite11-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: graphite
FD_PORT: 2103
FD_GRAPHITE_VERSION: 1.1
depends_on:
- graphite11

View File

@ -0,0 +1,5 @@
FROM microsoft/mssql-server-linux:2017-CU4
WORKDIR /usr/setup
COPY . /usr/setup
RUN chmod +x /usr/setup/setup.sh
CMD /bin/bash ./entrypoint.sh

View File

@ -0,0 +1,2 @@
#start SQL Server and run setup script
/usr/setup/setup.sh & /opt/mssql/bin/sqlservr

View File

@ -0,0 +1,12 @@
#/bin/bash
#wait for the SQL Server to come up
sleep 15s
cat /usr/setup/setup.sql.template | awk '{
gsub(/%%DB%%/,"'$MSSQL_DATABASE'");
gsub(/%%USER%%/,"'$MSSQL_USER'");
gsub(/%%PWD%%/,"'$MSSQL_PASSWORD'")
}1' > /usr/setup/setup.sql
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P $MSSQL_SA_PASSWORD -d master -i /usr/setup/setup.sql

View File

@ -0,0 +1,14 @@
CREATE LOGIN %%USER%% WITH PASSWORD = '%%PWD%%'
GO
CREATE DATABASE %%DB%%;
GO
USE %%DB%%;
GO
CREATE USER %%USER%% FOR LOGIN %%USER%%;
GO
EXEC sp_addrolemember 'db_owner', '%%USER%%';
GO

View File

@ -0,0 +1,539 @@
{
"__inputs": [
{
"name": "DS_MSSQL",
"label": "MSSQL",
"description": "",
"type": "datasource",
"pluginId": "mssql",
"pluginName": "MSSQL"
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "5.0.0"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": "5.0.0"
},
{
"type": "datasource",
"id": "mssql",
"name": "MSSQL",
"version": "1.0.0"
},
{
"type": "panel",
"id": "table",
"name": "Table",
"version": "5.0.0"
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "A dashboard visualizing data generated from grafana/fake-data-gen",
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1520976748896,
"links": [],
"panels": [
{
"aliasColors": {
"total avg": "#6ed0e0"
},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_MSSQL}",
"fill": 2,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 0
},
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 2,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
"alias": "total avg",
"fill": 0,
"pointradius": 3,
"points": true
}
],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "",
"format": "time_series",
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time,\n avg(value) as value,\n hostname as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'logins.count' AND\n hostname IN($host)\nGROUP BY $__timeGroup(createdAt,'$summarize'), hostname\nORDER BY 1",
"refId": "A"
},
{
"alias": "",
"format": "time_series",
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time,\n min(value) as value,\n 'total avg' as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'logins.count'\nGROUP BY $__timeGroup(createdAt,'$summarize')\nORDER BY 1",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Average logins / $summarize",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"decimals": null,
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_MSSQL}",
"fill": 2,
"gridPos": {
"h": 18,
"w": 12,
"x": 12,
"y": 0
},
"id": 8,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 2,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "",
"format": "time_series",
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time,\n avg(value) as value,\n 'started' as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'payment.started'\nGROUP BY $__timeGroup(createdAt,'$summarize')\nORDER BY 1",
"refId": "A"
},
{
"alias": "",
"format": "time_series",
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time,\n avg(value) as value,\n 'ended' as \"metric\"\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'payment.ended'\nGROUP BY $__timeGroup(createdAt,'$summarize')\nORDER BY 1",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Average payments started/ended / $summarize",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_MSSQL}",
"fill": 2,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 9
},
"id": 6,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 2,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "",
"format": "time_series",
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time,\n max(value) as value,\n hostname as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'cpu' AND\n hostname IN($host)\nGROUP BY $__timeGroup(createdAt,'$summarize'), hostname\nORDER BY 1",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Max CPU / $summarize",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"columns": [],
"datasource": "${DS_MSSQL}",
"fontSize": "100%",
"gridPos": {
"h": 10,
"w": 24,
"x": 0,
"y": 18
},
"id": 4,
"links": [],
"pageSize": null,
"scroll": true,
"showHeader": true,
"sort": {
"col": 0,
"desc": true
},
"styles": [
{
"alias": "Time",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "Time",
"type": "date"
},
{
"alias": "",
"colorMode": null,
"colors": [
"rgba(245, 54, 54, 0.9)",
"rgba(237, 129, 40, 0.89)",
"rgba(50, 172, 45, 0.97)"
],
"decimals": 2,
"pattern": "/.*/",
"thresholds": [],
"type": "number",
"unit": "short"
}
],
"targets": [
{
"alias": "",
"format": "table",
"rawSql": "SELECT createdAt as Time, source, datacenter, hostname, value FROM grafana_metric WHERE hostname in($host)",
"refId": "A"
}
],
"title": "Values",
"transform": "table",
"type": "table"
}
],
"schemaVersion": 16,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"allValue": null,
"current": {},
"datasource": "${DS_MSSQL}",
"hide": 0,
"includeAll": false,
"label": "Datacenter",
"multi": false,
"name": "datacenter",
"options": [],
"query": "SELECT DISTINCT datacenter FROM grafana_metric",
"refresh": 1,
"regex": "",
"sort": 1,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"allValue": null,
"current": {},
"datasource": "${DS_MSSQL}",
"hide": 0,
"includeAll": true,
"label": "Hostname",
"multi": true,
"name": "host",
"options": [],
"query": "SELECT DISTINCT hostname FROM grafana_metric WHERE datacenter='$datacenter'",
"refresh": 1,
"regex": "",
"sort": 1,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"auto": false,
"auto_count": 30,
"auto_min": "10s",
"current": {
"text": "1m",
"value": "1m"
},
"hide": 0,
"label": "Summarize",
"name": "summarize",
"options": [
{
"selected": false,
"text": "1s",
"value": "1s"
},
{
"selected": false,
"text": "10s",
"value": "10s"
},
{
"selected": false,
"text": "30s",
"value": "30s"
},
{
"selected": true,
"text": "1m",
"value": "1m"
},
{
"selected": false,
"text": "5m",
"value": "5m"
},
{
"selected": false,
"text": "10m",
"value": "10m"
},
{
"selected": false,
"text": "30m",
"value": "30m"
},
{
"selected": false,
"text": "1h",
"value": "1h"
},
{
"selected": false,
"text": "6h",
"value": "6h"
},
{
"selected": false,
"text": "12h",
"value": "12h"
},
{
"selected": false,
"text": "1d",
"value": "1d"
},
{
"selected": false,
"text": "7d",
"value": "7d"
},
{
"selected": false,
"text": "14d",
"value": "14d"
},
{
"selected": false,
"text": "30d",
"value": "30d"
}
],
"query": "1s,10s,30s,1m,5m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
"refresh": 2,
"type": "interval"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Grafana Fake Data Gen - MSSQL",
"uid": "86Js1xRmk",
"version": 11
}

View File

@ -0,0 +1,19 @@
mssql:
build:
context: blocks/mssql/build
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: Password!
MSSQL_PID: Express
MSSQL_DATABASE: grafana
MSSQL_USER: grafana
MSSQL_PASSWORD: Password!
ports:
- "1433:1433"
fake-mssql-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: mssql
FD_PORT: 1433

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,12 @@
mssqltests:
build:
context: blocks/mssql/build
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: Password!
MSSQL_PID: Express
MSSQL_DATABASE: grafanatest
MSSQL_USER: grafana
MSSQL_PASSWORD: Password!
ports:
- "1433:1433"

View File

@ -2,7 +2,7 @@
"__inputs": [
{
"name": "DS_MYSQL",
"label": "Mysql",
"label": "MySQL",
"description": "",
"type": "datasource",
"pluginId": "mysql",
@ -20,19 +20,19 @@
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
"version": "5.0.0"
},
{
"type": "datasource",
"id": "mysql",
"name": "MySQL",
"version": "1.0.0"
"version": "5.0.0"
},
{
"type": "panel",
"id": "table",
"name": "Table",
"version": ""
"version": "5.0.0"
}
],
"annotations": {
@ -53,7 +53,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1518602729468,
"iteration": 1523372133566,
"links": [],
"panels": [
{
@ -118,7 +118,7 @@
],
"thresholds": [],
"timeFrom": null,
"timeShift": "1h",
"timeShift": null,
"title": "Average logins / $summarize",
"tooltip": {
"shared": true,
@ -150,7 +150,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -204,7 +208,7 @@
],
"thresholds": [],
"timeFrom": null,
"timeShift": "1h",
"timeShift": null,
"title": "Average payments started/ended / $summarize",
"tooltip": {
"shared": true,
@ -236,7 +240,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -284,7 +292,7 @@
],
"thresholds": [],
"timeFrom": null,
"timeShift": "1h",
"timeShift": null,
"title": "Max CPU / $summarize",
"tooltip": {
"shared": true,
@ -316,7 +324,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"columns": [],
@ -369,7 +381,7 @@
"target": ""
}
],
"timeShift": "1h",
"timeShift": null,
"title": "Values",
"transform": "table",
"type": "table"
@ -428,7 +440,6 @@
"auto_count": 5,
"auto_min": "10s",
"current": {
"selected": true,
"text": "1m",
"value": "1m"
},
@ -545,5 +556,5 @@
"timezone": "",
"title": "Grafana Fake Data Gen - MySQL",
"uid": "DGsCac3kz",
"version": 6
"version": 8
}

View File

@ -7,9 +7,6 @@
MYSQL_PASSWORD: password
ports:
- "3306:3306"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all]
fake-mysql-data:

View File

@ -0,0 +1,3 @@
FROM mysql:latest
ADD setup.sql /docker-entrypoint-initdb.d
CMD ["mysqld"]

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
mysqltests:
image: mysql:latest
build:
context: blocks/mysql_tests
environment:
MYSQL_ROOT_PASSWORD: rootpass
MYSQL_DATABASE: grafana_tests
@ -7,7 +8,4 @@
MYSQL_PASSWORD: password
ports:
- "3306:3306"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
tmpfs: /var/lib/mysql:rw

View File

@ -0,0 +1,2 @@
CREATE DATABASE grafana_ds_tests;
GRANT ALL PRIVILEGES ON grafana_ds_tests.* TO 'grafana';

View File

@ -17,6 +17,7 @@ EXPOSE 389
VOLUME ["/etc/ldap", "/var/lib/ldap"]
COPY modules/ /etc/ldap.dist/modules
COPY prepopulate/ /etc/ldap.dist/prepopulate
COPY entrypoint.sh /entrypoint.sh

View File

@ -65,7 +65,7 @@ EOF
fi
if [[ -n "$SLAPD_ADDITIONAL_SCHEMAS" ]]; then
IFS=","; declare -a schemas=($SLAPD_ADDITIONAL_SCHEMAS)
IFS=","; declare -a schemas=($SLAPD_ADDITIONAL_SCHEMAS); unset IFS
for schema in "${schemas[@]}"; do
slapadd -n0 -F /etc/ldap/slapd.d -l "/etc/ldap/schema/${schema}.ldif" >/dev/null 2>&1
@ -73,14 +73,18 @@ EOF
fi
if [[ -n "$SLAPD_ADDITIONAL_MODULES" ]]; then
IFS=","; declare -a modules=($SLAPD_ADDITIONAL_MODULES)
IFS=","; declare -a modules=($SLAPD_ADDITIONAL_MODULES); unset IFS
for module in "${modules[@]}"; do
slapadd -n0 -F /etc/ldap/slapd.d -l "/etc/ldap/modules/${module}.ldif" >/dev/null 2>&1
done
fi
chown -R openldap:openldap /etc/ldap/slapd.d/
for file in `ls /etc/ldap/prepopulate/*.ldif`; do
slapadd -F /etc/ldap/slapd.d -l "$file"
done
chown -R openldap:openldap /etc/ldap/slapd.d/ /var/lib/ldap/ /var/run/slapd/
else
slapd_configs_in_env=`env | grep 'SLAPD_'`

View File

@ -0,0 +1,13 @@
# Notes on OpenLdap Docker Block
Any ldif files added to the prepopulate subdirectory will be automatically imported into the OpenLdap database.
The ldif files add three users, `ldapviewer`, `ldapeditor` and `ldapadmin`. Two groups, `admins` and `users`, are added that correspond with the group mappings in the default conf/ldap.toml. `ldapadmin` is a member of `admins` and `ldapeditor` is a member of `users`.
Note that users that are added here need to specify a `memberOf` attribute manually as well as the `member` attribute for the group. The `memberOf` module usually does this automatically (if you add a group in Apache Directory Studio for example) but this does not work in the entrypoint script as it uses the `slapadd` command to add entries before the server has started and before the `memberOf` module is loaded.
After adding ldif files to `prepopulate`:
1. Remove your current docker image: `docker rm docker_openldap_1`
2. Build: `docker-compose build`
3. `docker-compose up`

View File

@ -0,0 +1,10 @@
dn: cn=ldapadmin,dc=grafana,dc=org
mail: ldapadmin@grafana.com
userPassword: grafana
objectClass: person
objectClass: top
objectClass: inetOrgPerson
objectClass: organizationalPerson
sn: ldapadmin
cn: ldapadmin
memberOf: cn=admins,dc=grafana,dc=org

View File

@ -0,0 +1,5 @@
dn: cn=admins,dc=grafana,dc=org
cn: admins
member: cn=ldapadmin,dc=grafana,dc=org
objectClass: groupOfNames
objectClass: top

View File

@ -0,0 +1,10 @@
dn: cn=ldapeditor,dc=grafana,dc=org
mail: ldapeditor@grafana.com
userPassword: grafana
objectClass: person
objectClass: top
objectClass: inetOrgPerson
objectClass: organizationalPerson
sn: ldapeditor
cn: ldapeditor
memberOf: cn=users,dc=grafana,dc=org

View File

@ -0,0 +1,5 @@
dn: cn=users,dc=grafana,dc=org
cn: users
member: cn=ldapeditor,dc=grafana,dc=org
objectClass: groupOfNames
objectClass: top

View File

@ -0,0 +1,9 @@
dn: cn=ldapviewer,dc=grafana,dc=org
mail: ldapviewer@grafana.com
userPassword: grafana
objectClass: person
objectClass: top
objectClass: inetOrgPerson
objectClass: organizationalPerson
sn: ldapviewer
cn: ldapviewer

View File

@ -0,0 +1,3 @@
FROM postgres:latest
ADD setup.sql /docker-entrypoint-initdb.d
CMD ["postgres"]

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
postgrestest:
image: postgres:latest
build:
context: blocks/postgres_tests
environment:
POSTGRES_USER: grafanatest
POSTGRES_PASSWORD: grafanatest

View File

@ -0,0 +1,3 @@
CREATE DATABASE grafanadstest;
REVOKE CONNECT ON DATABASE grafanadstest FROM PUBLIC;
GRANT CONNECT ON DATABASE grafanadstest TO grafanatest;

View File

@ -23,3 +23,9 @@
network_mode: host
ports:
- "9093:9093"
prometheus-random-data:
build: blocks/prometheus_random_data
network_mode: host
ports:
- "8081:8080"

View File

@ -25,11 +25,15 @@ scrape_configs:
- job_name: 'node_exporter'
static_configs:
- targets: ['127.0.0.1:9100']
- job_name: 'fake-data-gen'
static_configs:
- targets: ['127.0.0.1:9091']
- job_name: 'grafana'
static_configs:
- targets: ['127.0.0.1:3000']
- job_name: 'prometheus-random-data'
static_configs:
- targets: ['127.0.0.1:8081']

View File

@ -1,3 +1,3 @@
FROM prom/prometheus:v2.0.0
FROM prom/prometheus:v2.2.0
ADD prometheus.yml /etc/prometheus/
ADD alert.rules /etc/prometheus/

View File

@ -23,3 +23,9 @@
network_mode: host
ports:
- "9093:9093"
prometheus-random-data:
build: blocks/prometheus_random_data
network_mode: host
ports:
- "8081:8080"

View File

@ -25,11 +25,15 @@ scrape_configs:
- job_name: 'node_exporter'
static_configs:
- targets: ['127.0.0.1:9100']
- job_name: 'fake-data-gen'
static_configs:
- targets: ['127.0.0.1:9091']
- job_name: 'grafana'
static_configs:
- targets: ['127.0.0.1:3000']
- job_name: 'prometheus-random-data'
static_configs:
- targets: ['127.0.0.1:8081']

View File

@ -0,0 +1,18 @@
# This Dockerfile builds an image for a client_golang example.
# Builder image, where we build the example.
FROM golang:1.9.0 AS builder
# Download prometheus/client_golang/examples/random first
RUN go get github.com/prometheus/client_golang/examples/random
WORKDIR /go/src/github.com/prometheus/client_golang
WORKDIR /go/src/github.com/prometheus/client_golang/prometheus
RUN go get -d
WORKDIR /go/src/github.com/prometheus/client_golang/examples/random
RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w'
# Final image.
FROM scratch
LABEL maintainer "The Prometheus Authors <prometheus-developers@googlegroups.com>"
COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/random .
EXPOSE 8080
ENTRYPOINT ["/random"]

View File

@ -22,6 +22,6 @@ log() {
log $RUN_CMD
$RUN_CMD
# Exit immidiately in case of any errors or when we have interactive terminal
# Exit immediately in case of any errors or when we have interactive terminal
if [[ $? != 0 ]] || test -t 0; then exit $?; fi
log

View File

@ -11,11 +11,13 @@ weight = 8
# Provisioning Grafana
## Config file
In previous versions of Grafana, you could only use the API for provisioning data sources and dashboards. But that required the service to be running before you started creating dashboards and you also needed to set up credentials for the HTTP API. In v5.0 we decided to improve this experience by adding a new active provisioning system that uses config files. This will make GitOps more natural as data sources and dashboards can be defined via files that can be version controlled. We hope to extend this system to later add support for users, orgs and alerts as well.
## Config File
Checkout the [configuration](/installation/configuration) page for more information on what you can configure in `grafana.ini`
### Config file locations
### Config File Locations
- Default configuration from `$WORKING_DIR/conf/defaults.ini`
- Custom configuration from `$WORKING_DIR/conf/custom.ini`
@ -26,7 +28,7 @@ Checkout the [configuration](/installation/configuration) page for more informat
> `/etc/grafana/grafana.ini`. This path is specified in the Grafana
> init.d script using `--config` file parameter.
### Using environment variables
### Using Environment Variables
All options in the configuration file (listed below) can be overridden
using environment variables using the syntax:
@ -59,7 +61,7 @@ export GF_AUTH_GOOGLE_CLIENT_SECRET=newS3cretKey
<hr />
## Configuration management tools
## Configuration Management Tools
Currently we do not provide any scripts/manifests for configuring Grafana. Rather than spending time learning and creating scripts/manifests for each tool, we think our time is better spent making Grafana easier to provision. Therefore, we heavily relay on the expertise of the community.
@ -76,10 +78,12 @@ Saltstack | [https://github.com/salt-formulas/salt-formula-grafana](https://gith
It's possible to manage datasources in Grafana by adding one or more yaml config files in the [`provisioning/datasources`](/installation/configuration/#provisioning) directory. Each config file can contain a list of `datasources` that will be added or updated during start up. If the datasource already exists, Grafana will update it to match the configuration file. The config file can also contain a list of datasources that should be deleted. That list is called `delete_datasources`. Grafana will delete datasources listed in `delete_datasources` before inserting/updating those in the `datasource` list.
### Running multiple Grafana instances.
### Running Multiple Grafana Instances
If you are running multiple instances of Grafana you might run into problems if they have different versions of the `datasource.yaml` configuration file. The best way to solve this problem is to add a version number to each datasource in the configuration and increase it when you update the config. Grafana will only update datasources with the same or lower version number than specified in the config. That way, old configs cannot overwrite newer configs if they restart at the same time.
### Example datasource config file
### Example Datasource Config File
```yaml
# config file version
apiVersion: 1
@ -133,14 +137,21 @@ datasources:
editable: false
```
#### Json data
#### Custom Settings per Datasource
Please refer to each datasource documentation for specific provisioning examples.
| Datasource | Misc |
| ---- | ---- |
| Elasticsearch | Elasticsearch uses the `database` property to configure the index for a datasource |
#### Json Data
Since not all datasources have the same configuration settings we only have the most common ones as fields. The rest should be stored as a json blob in the `json_data` field. Here are the most common settings that the core datasources use.
| Name | Type | Datasource |Description |
| ----| ---- | ---- | --- |
| Name | Type | Datasource | Description |
| ---- | ---- | ---- | ---- |
| tlsAuth | boolean | *All* | Enable TLS authentication using client cert configured in secure json data |
| tlsAuthWithCACert | boolean | *All* | Enable TLS authtication using CA cert |
| tlsAuthWithCACert | boolean | *All* | Enable TLS authentication using CA cert |
| tlsSkipVerify | boolean | *All* | Controls whether a client verifies the server's certificate chain and host name. |
| graphiteVersion | string | Graphite | Graphite version |
| timeInterval | string | Elastic, Influxdb & Prometheus | Lowest interval/step value that should be used for this data source |
@ -155,8 +166,7 @@ Since not all datasources have the same configuration settings we only have the
| tsdbResolution | string | OpenTsdb | Resolution |
| sslmode | string | Postgre | SSLmode. 'disable', 'require', 'verify-ca' or 'verify-full' |
#### Secure Json data
#### Secure Json Data
`{"authType":"keys","defaultRegion":"us-west-2","timeField":"@timestamp"}`
@ -194,9 +204,9 @@ providers:
When Grafana starts, it will update/insert all dashboards available in the configured path. Then later on poll that path and look for updated json files and insert those update/insert those into the database.
### Reuseable dashboard urls
### Reuseable Dashboard Urls
If the dashboard in the json file contains an [uid](/reference/dashboard/#json-fields), Grafana will force insert/update on that uid. This allows you to migrate dashboards betweens Grafana instances and provisioning Grafana from configuration without breaking the urls given since the new dashboard url uses the uid as identifer.
If the dashboard in the json file contains an [uid](/reference/dashboard/#json-fields), Grafana will force insert/update on that uid. This allows you to migrate dashboards betweens Grafana instances and provisioning Grafana from configuration without breaking the urls given since the new dashboard url uses the uid as identifier.
When Grafana starts, it will update/insert all dashboards available in the configured folders. If you modify the file, the dashboard will also be updated.
By default Grafana will delete dashboards in the database if the file is removed. You can disable this behavior using the `disableDeletion` setting.

View File

@ -41,6 +41,8 @@ Grafana ships with the following set of notification types:
To enable email notifications you have to setup [SMTP settings](/installation/configuration/#smtp)
in the Grafana config. Email notifications will upload an image of the alert graph to an
external image destination if available or fallback to attaching the image to the email.
Be aware that if you use the `local` image storage email servers and clients might not be
able to access the image.
### Slack
@ -58,6 +60,8 @@ Recipient | allows you to override the Slack recipient.
Mention | make it possible to include a mention in the Slack notification sent by Grafana. Ex @here or @channel
Token | If provided, Grafana will upload the generated image via Slack's file.upload API method, not the external image destination.
If you are using the token for a slack bot, then you have to invite the bot to the channel you want to send notifications and add the channel to the recipient field.
### PagerDuty
To set up PagerDuty, all you have to do is to provide an API key.
@ -149,10 +153,10 @@ Prometheus Alertmanager | `prometheus-alertmanager` | no
# Enable images in notifications {#external-image-store}
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessable (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessible (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
Amazon S3, Webdav, Google Cloud Storage and Azure Blob Storage. So to set that up you need to configure the [external image uploader](/installation/configuration/#external-image-storage) in your grafana-server ini config file.
Be aware that some notifiers requires public access to the image to be able to include it in the notification. So make sure to enable public access to the images. If your using local image uploader, your Grafana instance need to be accessible by the internet.
Be aware that some notifiers requires public access to the image to be able to include it in the notification. So make sure to enable public access to the images. If you're using local image uploader, your Grafana instance need to be accessible by the internet.
Currently only the Email Channels attaches images if no external image store is specified. To include images in alert notifications for other channels then you need to set up an external image store.

View File

@ -110,7 +110,7 @@ to `Keep Last State` in order to basically ignore them.
## Notifications
In alert tab you can also specify alert rule notifications along with a detailed messsage about the alert rule.
In alert tab you can also specify alert rule notifications along with a detailed message about the alert rule.
The message can contain anything, information about how you might solve the issue, link to runbook, etc.
The actual notifications are configured and shared between multiple alerts. Read the

View File

@ -1,6 +1,6 @@
+++
title = "Contributor Licence Agreement (CLA)"
description = "Contributer Licence Agreement (CLA)"
description = "Contributor Licence Agreement (CLA)"
type = "docs"
aliases = ["/project/cla", "docs/contributing/cla.html"]
[menu.docs]
@ -101,4 +101,4 @@ TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU [OR US]
<br>
<br>
<br>
This CLA aggreement is based on the [Harmony Contributor Aggrement Template (combined)](http://www.harmonyagreements.org/agreements.html), [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/)
This CLA agreement is based on the [Harmony Contributor Aggrement Template (combined)](http://www.harmonyagreements.org/agreements.html), [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/)

View File

@ -43,6 +43,40 @@ server is running on AWS you can use IAM Roles and authentication will be handle
Checkout AWS docs on [IAM Roles](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
## IAM Policies
Grafana needs permissions granted via IAM to be able to read CloudWatch metrics
and EC2 tags/instances. You can attach these permissions to IAM roles and
utilize Grafana's built-in support for assuming roles.
Here is a minimal policy example:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowReadingMetricsFromCloudWatch",
"Effect": "Allow",
"Action": [
"cloudwatch:ListMetrics",
"cloudwatch:GetMetricStatistics"
],
"Resource": "*"
},
{
"Sid": "AllowReadingTagsFromEC2",
"Effect": "Allow",
"Action": [
"ec2:DescribeTags",
"ec2:DescribeInstances"
],
"Resource": "*"
}
]
}
```
### AWS credentials file
Create a file at `~/.aws/credentials`. That is the `HOME` path for user running grafana-server.
@ -87,7 +121,7 @@ Name | Description
*namespaces()* | Returns a list of namespaces CloudWatch support.
*metrics(namespace, [region])* | Returns a list of metrics in the namespace. (specify region or use "default" for custom metrics)
*dimension_keys(namespace)* | Returns a list of dimension keys in the namespace.
*dimension_values(region, namespace, metric, dimension_key)* | Returns a list of dimension values matching the specified `region`, `namespace`, `metric` and `dimension_key`.
*dimension_values(region, namespace, metric, dimension_key, [filters])* | Returns a list of dimension values matching the specified `region`, `namespace`, `metric`, `dimension_key` or you can use dimension `filters` to get more specific result as well.
*ebs_volume_ids(region, instance_id)* | Returns a list of volume ids matching the specified `region`, `instance_id`.
*ec2_instance_attribute(region, attribute_name, filters)* | Returns a list of attributes matching the specified `region`, `attribute_name`, `filters`.
@ -104,6 +138,7 @@ Query | Service
*dimension_values(us-east-1,AWS/Redshift,CPUUtilization,ClusterIdentifier)* | RedShift
*dimension_values(us-east-1,AWS/RDS,CPUUtilization,DBInstanceIdentifier)* | RDS
*dimension_values(us-east-1,AWS/S3,BucketSizeBytes,BucketName)* | S3
*dimension_values(us-east-1,CWAgent,disk_used_percent,device,{"InstanceId":"$instance_id"})* | CloudWatch Agent
## ec2_instance_attribute examples
@ -172,3 +207,37 @@ Amazon provides 1 million CloudWatch API requests each month at no additional ch
it costs $0.01 per 1,000 GetMetricStatistics or ListMetrics requests. For each query Grafana will
issue a GetMetricStatistics request and every time you pick a dimension in the query editor
Grafana will issue a ListMetrics request.
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
Using a credentials file
```yaml
apiVersion: 1
datasources:
- name: Cloudwatch
type: cloudwatch
jsonData:
authType: credentials
defaultRegion: eu-west-2
```
Using `accessKey` and `secretKey`
```yaml
apiVersion: 1
datasources:
- name: Cloudwatch
type: cloudwatch
jsonData:
authType: keys
defaultRegion: eu-west-2
secureJsonData:
accessKey: "<your access key>"
secretKey: "<your secret key>"
```

View File

@ -55,6 +55,22 @@ a time pattern for the index name or a wildcard.
Be sure to specify your Elasticsearch version in the version selection dropdown. This is very important as there are differences how queries are composed. Currently only 2.x and 5.x
are supported.
### Min time interval
A lower limit for the auto group by time interval. Recommended to be set to write frequency, for example `1m` if your data is written every minute.
This option can also be overridden/configured in a dashboard panel under data source options. It's important to note that this value **needs** to be formatted as a
number followed by a valid time identifier, e.g. `1m` (1 minute) or `30s` (30 seconds). The following time identifiers are supported:
Identifier | Description
------------ | -------------
`y` | year
`M` | month
`w` | week
`d` | day
`h` | hour
`m` | minute
`s` | second
`ms` | millisecond
## Metric Query editor
![](/img/docs/elasticsearch/query_editor.png)
@ -137,3 +153,23 @@ Query | You can leave the search query blank or specify a lucene query
Time | The name of the time field, needs to be date field.
Text | Event description field.
Tags | Optional field name to use for event tags (can be an array or a CSV string).
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Elastic
type: elasticsearch
access: proxy
database: "[metrics-]YYYY.MM.DD"
url: http://localhost:9200
jsonData:
interval: Daily
timeField: "@timestamp"
```

View File

@ -75,7 +75,7 @@ You can reference queries by the row “letter” that theyre on (similar to
## Point consolidation
All Graphite metrics are consolidated so that Graphite doesn't return more data points than there are pixels in the graph. By default,
this consolidation is done using `avg` function. You can how Graphite consolidates metrics by adding the Graphite consolidateBy function.
this consolidation is done using `avg` function. You can control how Graphite consolidates metrics by adding the Graphite consolidateBy function.
> *Notice* This means that legend summary values (max, min, total) cannot be all correct at the same time. They are calculated
> client side by Grafana. And depending on your consolidation function only one or two can be correct at the same time.
@ -120,3 +120,21 @@ queries via the Dashboard menu / Annotations view.
Graphite supports two ways to query annotations. A regular metric query, for this you use the `Graphite query` textbox. A Graphite events query, use the `Graphite event tags` textbox,
specify a tag or wildcard (leave empty should also work)
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Graphite
type: graphite
access: proxy
url: http://localhost:8080
jsonData:
graphiteVersion: "1.1"
```

View File

@ -39,6 +39,22 @@ Proxy access means that the Grafana backend will proxy all requests from the bro
`grafana-server`. This means that the URL you specify needs to be accessible from the server you are running Grafana on. Proxy access
mode is also more secure as the username & password will never reach the browser.
### Min time interval
A lower limit for the auto group by time interval. Recommended to be set to write frequency, for example `1m` if your data is written every minute.
This option can also be overridden/configured in a dashboard panel under data source options. It's important to note that this value **needs** to be formatted as a
number followed by a valid time identifier, e.g. `1m` (1 minute) or `30s` (30 seconds). The following time identifiers are supported:
Identifier | Description
------------ | -------------
`y` | year
`M` | month
`w` | week
`d` | day
`h` | hour
`m` | minute
`s` | second
`ms` | millisecond
## Query Editor
{{< docs-imagebox img="/img/docs/v45/influxdb_query_still.png" class="docs-image--no-shadow" animated-gif="/img/docs/v45/influxdb_query.gif" >}}
@ -174,3 +190,22 @@ SELECT title, description from events WHERE $timeFilter order asc
For InfluxDB you need to enter a query like in the above example. You need to have the ```where $timeFilter```
part. If you only select one column you will not need to enter anything in the column mapping fields. The
Tags field can be a comma separated string.
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: InfluxDB
type: influxdb
access: proxy
database: site
user: grafana
password: grafana
url: http://localhost:8086
```

View File

@ -225,3 +225,21 @@ tags | Optional field name to use for event tags as a comma separated string.
## Alerting
Time series queries should work in alerting conditions. Table formatted queries is not yet supported in alert rule conditions.
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: MySQL
type: mysql
url: localhost:3306
database: grafana
user: grafana
password: password
```

View File

@ -78,7 +78,7 @@ the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` o
### Nested Templating
One template variable can be used to filter tag values for another template varible. First parameter is the metric name,
One template variable can be used to filter tag values for another template variable. First parameter is the metric name,
second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables.
Some examples are mentioned below to make nested template queries work successfully.
@ -88,3 +88,22 @@ Query | Description
*tag_values(cpu, hostanme, env=$env, region=$region)* | Return tag values for cpu metric, selected env tag value, selected region tag value and tag key hostname
For details on OpenTSDB metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html)
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: OpenTsdb
type: opentsdb
access: proxy
url: http://localhost:4242
jsonData:
tsdbResolution: 1
tsdbVersion: 1
```

View File

@ -217,3 +217,25 @@ tags | Optional field name to use for event tags as a comma separated string.
Time series queries should work in alerting conditions. Table formatted queries is not yet supported in alert rule
conditions.
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Postgres
type: postgres
url: localhost:5432
database: grafana
user: grafana
secureJsonData:
password: "Password!"
jsonData:
sslmode: "disable" # disable/require/verify-ca/verify-full
```

View File

@ -34,7 +34,7 @@ Name | Description
*Basic Auth* | Enable basic authentication to the Prometheus data source.
*User* | Name of your Prometheus user
*Password* | Database user's password
*Scrape interval* | This will be used as a lower limit for the Prometheus step query parameter. Default value is 15s.
*Scrape interval* | This will be used as a lower limit for the Prometheus step query parameter. Default value is 15s.
## Query editor
@ -93,10 +93,26 @@ queries via the Dashboard menu / Annotations view.
Prometheus supports two ways to query annotations.
- A regular metric query
- A Prometheus query for pending and firing alerts (for details see [Inspecting alerts during runtime](https://prometheus.io/docs/alerting/rules/#inspecting-alerts-during-runtime))
- A Prometheus query for pending and firing alerts (for details see [Inspecting alerts during runtime](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/#inspecting-alerts-during-runtime))
The step option is useful to limit the number of events returned from your query.
## Getting Grafana metrics into Prometheus
Since 4.6.0 Grafana exposes metrics for Prometheus on the `/metrics` endpoint. We also bundle a dashboard within Grafana so you can get started viewing your metrics faster. You can import the bundled dashboard by going to the data source edit page and click the dashboard tab. There you can find a dashboard for Grafana and one for Prometheus. Import and start viewing all the metrics!
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://localhost:9090
```

View File

@ -14,7 +14,7 @@ weight = 4
{{< docs-imagebox img="/img/docs/v45/alert-list-panel.png" max-width="850px" >}}
The alert list panel allows you to display your dashbords alerts. The list can be configured to show current state or recent state changes. You can read more about alerts [here](http://docs.grafana.org/alerting/rules).
The alert list panel allows you to display your dashboards alerts. The list can be configured to show current state or recent state changes. You can read more about alerts [here](http://docs.grafana.org/alerting/rules).
## Alert List Options

View File

@ -25,7 +25,7 @@ The dashboard list panel allows you to display dynamic links to other dashboards
1. **Starred**: The starred dashboard selection displays starred dashboards in alphabetical order.
2. **Recently Viewed**: The recently viewed dashboard selection displays recently viewed dashboards in alphabetical order.
3. **Search**: The search dashboard selection displays dashboards by search query or tag(s).
4. **Show Headings**: When show headings is ticked the choosen list selection(Starred, Recently Viewed, Search) is shown as a heading.
4. **Show Headings**: When show headings is ticked the chosen list selection(Starred, Recently Viewed, Search) is shown as a heading.
5. **Max Items**: Max items set the maximum of items in a list.
6. **Query**: Here is where you enter your query you want to search by. Queries are case-insensitive, and partial values are accepted.
7. **Tags**: Here is where you enter your tag(s) you want to search by. Note that existing tags will not appear as you type, and *are* case sensitive. To see a list of existing tags, you can always return to the dashboard, open the Dashboard Picker at the top and click `tags` link in the search bar.

View File

@ -30,7 +30,7 @@ The singlestat panel has a normal query editor to allow you define your exact me
* **total** - The sum of all the non-null values in the series
* **first** - The first value in the series
* **delta** - The total incremental increase (of a counter) in the series. An attempt is made to account for counter resets, but this will only be accurate for single instance metrics. Used to show total counter increase in time series.
* **diff** - The difference betwen 'current' (last value) and 'first'.
* **diff** - The difference between 'current' (last value) and 'first'.
* **range** - The difference between 'min' and 'max'. Useful the show the range of change for a gauge.
2. **Prefix/Postfix**: The Prefix/Postfix fields let you define a custom label to appear *before/after* the value. The `$__name` variable can be used here to use the series name or alias from the metric query.
3. **Units**: Units are appended to the the Singlestat within the panel, and will respect the color and threshold settings for the value.
@ -70,7 +70,7 @@ Gauges gives a clear picture of how high a value is in it's context. It's a grea
{{< docs-imagebox img="/img/docs/v45/singlestat-gauge-options.png" max-width="500px" class="docs-image--right docs-image--no-shadow">}}
1. **Show**: The show checkbox will toggle wether the gauge is shown in the panel. When unselected, only the Singlestat value will appear.
1. **Show**: The show checkbox will toggle whether the gauge is shown in the panel. When unselected, only the Singlestat value will appear.
2. **Min/Max**: This sets the start and end point for the gauge.
3. **Threshold Labels**: Check if you want to show the threshold labels. Thresholds are set in the color options.
4. **Threshold Markers**: Check if you want to have a second meter showing the thresholds.

View File

@ -42,6 +42,7 @@ Hit `?` on your keyboard to open the shortcuts help modal.
- `e` Toggle panel edit view
- `v` Toggle panel fullscreen view
- `p` `s` Open Panel Share Modal
- `p` `d` Duplicate Panel
- `p` `r` Remove Panel
### Time Range

View File

@ -15,7 +15,7 @@ support for multiple Cloudwatch credentials.
<img src="/assets/img/features/table-panel.png">
The new table panel is very flexible, supporting both multiple modes for time series as well as for
table, annotation and raw JSON data. It also provides date formating and value formating and coloring options.
table, annotation and raw JSON data. It also provides date formatting and value formatting and coloring options.
### Time series to rows

View File

@ -33,7 +33,7 @@ You can enable/disable the shared tooltip from the dashboard settings menu or cy
{{< imgbox max-width="60%" img="/img/docs/v41/helptext_for_panel_settings.png" caption="Hovering help text" >}}
You can set a help text in the general tab on any panel. The help text is using Markdown to enable better formating and linking to other sites that can provide more information.
You can set a help text in the general tab on any panel. The help text is using Markdown to enable better formatting and linking to other sites that can provide more information.
<div class="clearfix"></div>

View File

@ -12,7 +12,7 @@ weight = -4
# What's New in Grafana v4.5
## Hightlights
## Highlights
### New prometheus query editor
@ -62,7 +62,7 @@ Datas source selection & options & help are now above your metric queries.
### Minor Changes
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
* **InfluxDB**: Added parenthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
## Bug Fixes

View File

@ -45,7 +45,7 @@ This makes exploring and filtering Prometheus data much easier.
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Graph**: Add support for local formatting in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/2764)
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)

View File

@ -180,14 +180,14 @@ Content-Type: application/json
## Delete Annotation By Id
`DELETE /api/annotation/:id`
`DELETE /api/annotations/:id`
Deletes the annotation that matches the specified id.
**Example Request**:
```http
DELETE /api/annotation/1 HTTP/1.1
DELETE /api/annotations/1 HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
@ -204,14 +204,14 @@ Content-Type: application/json
## Delete Annotation By RegionId
`DELETE /api/annotation/region/:id`
`DELETE /api/annotations/region/:id`
Deletes the annotation that matches the specified region id. A region is an annotation that covers a timerange and has a start and end time. In the Grafana database, this is a stored as two annotations connected by a region id.
**Example Request**:
```http
DELETE /api/annotation/region/1 HTTP/1.1
DELETE /api/annotations/region/1 HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk

View File

@ -43,11 +43,7 @@ Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
"title": "Production Overview",
"tags": [ "templated" ],
"timezone": "browser",
"rows": [
{
}
],
"schemaVersion": 6,
"schemaVersion": 16,
"version": 0
},
"folderId": 0,
@ -140,11 +136,7 @@ Content-Type: application/json
"title": "Production Overview",
"tags": [ "templated" ],
"timezone": "browser",
"rows": [
{
}
],
"schemaVersion": 6,
"schemaVersion": 16,
"version": 0
},
"meta": {
@ -219,14 +211,9 @@ Content-Type: application/json
"editable":false,
"hideControls":true,
"nav":[
{
"enable":false,
"type":"timepicker"
}
],
"rows": [
{
"enable":false,
"type":"timepicker"
}
],
"style":"dark",
@ -322,11 +309,7 @@ Content-Type: application/json
"title": "Production Overview",
"tags": [ "templated" ],
"timezone": "browser",
"rows": [
{
}
],
"schemaVersion": 6,
"schemaVersion": 16,
"version": 0
},
"meta": {

View File

@ -188,8 +188,8 @@ Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
"defaultRegion": "us-west-1"
},
"secureJsonData": {
"accessKey": "Ol4pIDpeKSA6XikgOl4p",
"secretKey": "dGVzdCBrZXkgYmxlYXNlIGRvbid0IHN0ZWFs"
"accessKey": "Ol4pIDpeKSA6XikgOl4p", //should not be encoded
"secretKey": "dGVzdCBrZXkgYmxlYXNlIGRvbid0IHN0ZWFs" //should be Base-64 encoded
}
}
```

View File

@ -31,6 +31,7 @@ dashboards, creating users and updating data sources.
* [Annotations API]({{< relref "http_api/annotations.md" >}})
* [Alerting API]({{< relref "http_api/alerting.md" >}})
* [User API]({{< relref "http_api/user.md" >}})
* [Team API]({{< relref "http_api/team.md" >}})
* [Admin API]({{< relref "http_api/admin.md" >}})
* [Preferences API]({{< relref "http_api/preferences.md" >}})
* [Other API]({{< relref "http_api/other.md" >}})

View File

@ -307,7 +307,7 @@ Content-Type: application/json
`PUT /api/orgs/:orgId`
Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented yet.
Update Organisation, fields *Address 1*, *Address 2*, *City* are not implemented yet.
**Example Request**:
@ -436,4 +436,4 @@ HTTP/1.1 200
Content-Type: application/json
{"message":"User removed from organization"}
```
```

View File

@ -234,7 +234,12 @@ The maximum number of connections in the idle connection pool.
### max_open_conn
The maximum number of open connections to the database.
### conn_max_lifetime
Sets the maximum amount of time a connection may be reused. The default is 14400 (which means 14400 seconds or 4 hours). For MySQL, this setting should be shorter than the [`wait_timeout`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_wait_timeout) variable.
### log_queries
Set to `true` to log the sql calls and execution times.
<hr />
@ -477,7 +482,7 @@ Set api_url to the resource that returns [OpenID UserInfo](https://connect2id.co
First set up Grafana as an OpenId client "webapplication" in Okta. Then set the Base URIs to `https://<grafana domain>/` and set the Login redirect URIs to `https://<grafana domain>/login/generic_oauth`.
Finaly set up the generic oauth module like this:
Finally set up the generic oauth module like this:
```bash
[auth.generic_oauth]
name = Okta

View File

@ -15,7 +15,7 @@ weight = 1
Description | Download
------------ | -------------
Stable for Debian-based Linux | [grafana_5.0.0_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.0_amd64.deb)
Stable for Debian-based Linux | [grafana_5.0.4_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.4_amd64.deb)
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
installation.
@ -24,9 +24,9 @@ installation.
```bash
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.0_amd64.deb
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.4_amd64.deb
sudo apt-get install -y adduser libfontconfig
sudo dpkg -i grafana_5.0.0_amd64.deb
sudo dpkg -i grafana_5.0.4_amd64.deb
```
## APT Repository
@ -34,7 +34,7 @@ sudo dpkg -i grafana_5.0.0_amd64.deb
Add the following line to your `/etc/apt/sources.list` file.
```bash
deb https://packagecloud.io/grafana/stable/debian/ jessie main
deb https://packagecloud.io/grafana/stable/debian/ stretch main
```
Use the above line even if you are on Ubuntu or another Debian version.
@ -42,7 +42,7 @@ There is also a testing repository if you want beta or release
candidates.
```bash
deb https://packagecloud.io/grafana/testing/debian/ jessie main
deb https://packagecloud.io/grafana/testing/debian/ stretch main
```
Then add the [Package Cloud](https://packagecloud.io/grafana) key. This

View File

@ -12,7 +12,7 @@ weight = 4
# Installing using Docker
Grafana is very easy to install and run using the offical docker container.
Grafana is very easy to install and run using the official docker container.
```bash
$ docker run -d -p 3000:3000 grafana/grafana
@ -83,7 +83,7 @@ $ docker run \
-d \
-p 3000:3000 \
--name grafana \
grafana/grafana:4.5.2
grafana/grafana:5.0.2
```
## Configuring AWS Credentials for CloudWatch Support

View File

@ -15,7 +15,7 @@ weight = 2
Description | Download
------------ | -------------
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [5.0.0 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.0-1.x86_64.rpm)
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [5.0.4 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.4-1.x86_64.rpm)
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
@ -26,7 +26,7 @@ installation.
You can install Grafana using Yum directly.
```bash
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.0-1.x86_64.rpm
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.4-1.x86_64.rpm
```
Or install manually using `rpm`.
@ -34,15 +34,15 @@ Or install manually using `rpm`.
#### On CentOS / Fedora / Redhat:
```bash
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.0-1.x86_64.rpm
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.4-1.x86_64.rpm
$ sudo yum install initscripts fontconfig
$ sudo rpm -Uvh grafana-5.0.0-1.x86_64.rpm
$ sudo rpm -Uvh grafana-5.0.4-1.x86_64.rpm
```
#### On OpenSuse:
```bash
$ sudo rpm -i --nodeps grafana-5.0.0-1.x86_64.rpm
$ sudo rpm -i --nodeps grafana-5.0.4-1.x86_64.rpm
```
## Install via YUM Repository
@ -52,7 +52,7 @@ Add the following to a new file at `/etc/yum.repos.d/grafana.repo`
```bash
[grafana]
name=grafana
baseurl=https://packagecloud.io/grafana/stable/el/6/$basearch
baseurl=https://packagecloud.io/grafana/stable/el/7/$basearch
repo_gpgcheck=1
enabled=1
gpgcheck=1
@ -64,7 +64,7 @@ sslcacert=/etc/pki/tls/certs/ca-bundle.crt
There is also a testing repository if you want beta or release candidates.
```bash
baseurl=https://packagecloud.io/grafana/testing/el/6/$basearch
baseurl=https://packagecloud.io/grafana/testing/el/7/$basearch
```
Then install Grafana via the `yum` command.

View File

@ -23,9 +23,9 @@ Before upgrading it can be a good idea to backup your Grafana database. This wil
#### sqlite
If you use sqlite you only need to make a backup of you `grafana.db` file. This is usually located at `/var/lib/grafana/grafana.db` on unix system.
If you use sqlite you only need to make a backup of your `grafana.db` file. This is usually located at `/var/lib/grafana/grafana.db` on unix system.
If you are unsure what database you use and where it is stored check you grafana configuration file. If you
installed grafana to custom location using a binary tar/zip it is usally in `<grafana_install_dir>/data`.
installed grafana to custom location using a binary tar/zip it is usually in `<grafana_install_dir>/data`.
#### mysql

View File

@ -8,12 +8,11 @@ parent = "installation"
weight = 3
+++
# Installing on Windows
Description | Download
------------ | -------------
Latest stable package for Windows | [grafana-5.0.0.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.0.windows-x64.zip)
Latest stable package for Windows | [grafana-5.0.4.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.4.windows-x64.zip)
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
installation.

View File

@ -71,13 +71,13 @@ Each field in the dashboard JSON is explained below with its usage:
| **timepicker** | timepicker metadata, see [timepicker section](#timepicker) for details |
| **templating** | templating metadata, see [templating section](#templating) for details |
| **annotations** | annotations metadata, see [annotations section](#annotations) for details |
| **schemaVersion** | version of the JSON schema (integer), incremented each time a Grafana update brings changes to the said schema |
| **schemaVersion** | version of the JSON schema (integer), incremented each time a Grafana update brings changes to said schema |
| **version** | version of the dashboard (integer), incremented each time the dashboard is updated |
| **panels** | panels array, see below for detail. |
## Panels
Panels are the building blocks a dashboard. It consists of datasource queries, type of graphs, aliases, etc. Panel JSON consists of an array of JSON objects, each representing a different panel. Most of the fields are common for all panels but some fields depends on the panel type. Following is an example of panel JSON of a text panel.
Panels are the building blocks of a dashboard. It consists of datasource queries, type of graphs, aliases, etc. Panel JSON consists of an array of JSON objects, each representing a different panel. Most of the fields are common for all panels but some fields depend on the panel type. Following is an example of panel JSON of a text panel.
```json
"panels": [
@ -105,7 +105,7 @@ The gridPos property describes the panel size and position in grid coordinates.
- `x` The x position, in same unit as `w`.
- `y` The y position, in same unit as `h`.
The grid has a negative gravity that moves panels up if there i empty space above a panel.
The grid has a negative gravity that moves panels up if there is empty space above a panel.
### timepicker
@ -161,7 +161,7 @@ Usage of the fields is explained below:
### templating
`templating` fields contains array of template variables with their saved values along with some other metadata, for example:
The `templating` field contains an array of template variables with their saved values along with some other metadata, for example:
```json
"templating": {
@ -236,7 +236,7 @@ Usage of the above mentioned fields in the templating section is explained below
| Name | Usage |
| ---- | ----- |
| **enable** | whether templating is enabled or not |
| **list** | an array of objects representing, each representing one template variable |
| **list** | an array of objects each representing one template variable |
| **allFormat** | format to use while fetching all values from datasource, eg: `wildcard`, `glob`, `regex`, `pipe`, etc. |
| **current** | shows current selected variable text/value on the dashboard |
| **datasource** | shows datasource for the variables |

View File

@ -49,7 +49,7 @@ Click the back button to rewind to the previous Dashboard in the Playlist.
In TV mode the top navbar, row & panel controls will all fade to transparent.
This happens automatically after one minute of user inactivity but can also be toggled manually
with the `d v` sequence shortcut. Any mouse movement or keyboard action will
with the `d v` sequence shortcut, or by appending the parameter `?inactive` to the dashboard URL. Any mouse movement or keyboard action will
restore navbar & controls.
Another feature is the kiosk mode - in kiosk mode the navbar is completely hidden/removed from view. This can be enabled with the `d k`

View File

@ -1,6 +1,6 @@
+++
title = "Variables"
keywords = ["grafana", "templating", "documentation", "guide"]
keywords = ["grafana", "templating", "documentation", "guide", "template", "variable"]
type = "docs"
[menu.docs]
name = "Variables"
@ -80,6 +80,73 @@ Option | Description
*Regex* | Regex to filter or capture specific parts of the names return by your data source query. Optional.
*Sort* | Define sort order for options in dropdown. **Disabled** means that the order of options returned by your data source query will be used.
#### Using regex to filter/modify values in the Variable dropdown
Using the Regex Query Option, you filter the list of options returned by the Variable query or modify the options returned.
Examples of filtering on the following list of options:
```text
backend_01
backend_02
backend_03
backend_04
```
##### Filter so that only the options that end with `01` or `02` are returned:
Regex:
```regex
/.*[01|02]/
```
Result:
```text
backend_01
backend_02
```
##### Filter and modify the options using a regex capture group to return part of the text:
Regex:
```regex
/.*(01|02)/
```
Result:
```text
01
02
```
#### Filter and modify - Prometheus Example
List of options:
```text
up{instance="demo.robustperception.io:9090",job="prometheus"} 1 1521630638000
up{instance="demo.robustperception.io:9093",job="alertmanager"} 1 1521630638000
up{instance="demo.robustperception.io:9100",job="node"} 1 1521630638000
```
Regex:
```regex
/.*instance="([^"]*).*/
```
Result:
```text
demo.robustperception.io:9090
demo.robustperception.io:9093
demo.robustperception.io:9100
```
### Query expressions
The query expressions are different for each data source.
@ -101,12 +168,14 @@ Option | Description
*Include All option* | Add a special `All` option whose value includes all options.
*Custom all value* | By default the `All` value will include all options in combined expression. This can become very long and can have performance problems. Many times it can be better to specify a custom all value, like a wildcard regex. To make it possible to have custom regex, globs or lucene syntax in the **Custom all value** option it is never escaped so you will have to think avbout what is a valid value for your data source.
### Formating multiple values
### Formatting multiple values
Interpolating a variable with multiple values selected is tricky as it is not straight forward how to format the multiple values to into a string that
is valid in the given context where the variable is used. Grafana tries to solve this by allowing each data source plugin to
inform the templating interpolation engine what format to use for multiple values.
Note that the *Custom all value* option on the variable will have to be left blank for Grafana to format all values into a single string.
**Graphite**, for example, uses glob expressions. A variable with multiple values would, in this case, be interpolated as `{host1,host2,host3}` if
the current variable value was *host1*, *host2* and *host3*.
@ -117,7 +186,7 @@ break the regex expression.
**Elasticsearch** uses lucene query syntax, so the same variable would, in this case, be formatted as `("host1" OR "host2" OR "host3")`. In this case every value
needs to be escaped so that the value can contain lucene control words and quotation marks.
#### Formating troubles
#### Formatting troubles
Automatic escaping & formatting can cause problems and it can be tricky to grasp the logic is behind it.
Especially for InfluxDB and Prometheus where the use of regex syntax requires that the variable is used in regex operator context.

View File

@ -108,7 +108,7 @@ In this example we use Apache as a reverseProxy in front of Grafana. Apache hand
* The next part of the configuration is the tricky part. We use Apaches rewrite engine to create our **X-WEBAUTH-USER header**, populated with the authenticated user.
* **RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER}, NS]**: This line is a little bit of magic. What it does, is for every request use the rewriteEngines look-ahead (LA-U) feature to determine what the REMOTE_USER variable would be set to after processing the request. Then assign the result to the variable PROXY_USER. This is neccessary as the REMOTE_USER variable is not available to the RequestHeader function.
* **RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER}, NS]**: This line is a little bit of magic. What it does, is for every request use the rewriteEngines look-ahead (LA-U) feature to determine what the REMOTE_USER variable would be set to after processing the request. Then assign the result to the variable PROXY_USER. This is necessary as the REMOTE_USER variable is not available to the RequestHeader function.
* **RequestHeader set X-WEBAUTH-USER “%{PROXY_USER}e”**: With the authenticated username now stored in the PROXY_USER variable, we create a new HTTP request header that will be sent to our backend Grafana containing the username.
@ -149,7 +149,7 @@ auto_sign_up = true
##### Grafana Container
For this example, we use the offical Grafana docker image available at [Docker Hub](https://hub.docker.com/r/grafana/grafana/)
For this example, we use the official Grafana docker image available at [Docker Hub](https://hub.docker.com/r/grafana/grafana/)
* Create a file `grafana.ini` with the following contents
@ -166,7 +166,7 @@ header_property = username
auto_sign_up = true
```
* Launch the Grafana container, using our custom grafana.ini to replace `/etc/grafana/grafana.ini`. We dont expose any ports for this container as it will only be connected to by our Apache container.
* Launch the Grafana container, using our custom grafana.ini to replace `/etc/grafana/grafana.ini`. We don't expose any ports for this container as it will only be connected to by our Apache container.
```bash
docker run -i -v $(pwd)/grafana.ini:/etc/grafana/grafana.ini --name grafana grafana/grafana
@ -174,7 +174,7 @@ docker run -i -v $(pwd)/grafana.ini:/etc/grafana/grafana.ini --name grafana graf
### Apache Container
For this example we use the offical Apache docker image available at [Docker Hub](https://hub.docker.com/_/httpd/)
For this example we use the official Apache docker image available at [Docker Hub](https://hub.docker.com/_/httpd/)
* Create a file `httpd.conf` with the following contents
@ -244,4 +244,4 @@ ProxyPassReverse / http://grafana:3000/
### Use grafana.
With our Grafana and Apache containers running, you can now connect to http://localhost/ and log in using the username/password we created in the htpasswd file.
With our Grafana and Apache containers running, you can now connect to http://localhost/ and log in using the username/password we created in the htpasswd file.

View File

@ -9,30 +9,38 @@ weight = 10
# How to setup Grafana for high availability
> Alerting does not support high availability yet.
Setting up Grafana for high availability is fairly simple. It comes down to two things:
* Use a shared database for multiple grafana instances.
* Consider how user sessions are stored.
1. Use a shared database for storing dashboard, users, and other persistent data
2. Decide how to store session data.
<div class="text-center">
<img src="/img/docs/tutorials/grafana-high-availability.png" max-width= "800px" class="center"></img>
</div>
## Configure multiple servers to use the same database
First you need to do is to setup mysql or postgres on another server and configure Grafana to use that database.
First, you need to do is to setup MySQL or Postgres on another server and configure Grafana to use that database.
You can find the configuration for doing that in the [[database]]({{< relref "configuration.md" >}}#database) section in the grafana config.
Grafana will now persist all long term data in the database.
It also worth considering how to setup the database for high availability but thats outside the scope of this guide.
Grafana will now persist all long term data in the database. How to configure the database for high availability is out of scope for this guide. We recommend finding an expert on for the database your using.
## User sessions
The second thing to consider is how to deal with user sessions and how to balance the load between servers.
By default Grafana stores user sessions on disk which works fine if you use `sticky sessions` in your load balancer.
Grafana also supports storing the session data in the database, redis or memcache which makes it possible to use round robin in your load balancer.
If you use mysql/postgres for session storage you first need a table to store the session data in. More details about that in [[sessions]]({{< relref "configuration.md" >}}#session)
The second thing to consider is how to deal with user sessions and how to configure your load balancer infront of Grafana.
Grafana support two says of storing session data locally on disk or in a database/cache-server.
If you want to store sessions on disk you can use `sticky sessions` in your load balanacer. If you prefer to store session data in a database/cache-server
you can use any stateless routing strategy in your load balancer (ex round robin or least connections).
For Grafana itself it doesn't really matter if you store your sessions on disk or database/redis/memcache.
But we suggest that you store the session in redis/memcache since it makes it easier to add/remote instances from the group.
### Sticky sessions
Using sticky sessions, all traffic for one user will always be sent to the same server. Which means that session related data can be
stored on disk rather than on a shared database. This is the default behavior for Grafana and if only want multiple servers for fail over this is a good solution since it requires the least amount of work.
### Stateless sessions
You can also choose to store session data in a Redis/Memcache/Postgres/MySQL which means that the load balancer can send a user to any Grafana server without having to log in on each server. This requires a little bit more work from the operator but enables you to remove/add grafana servers without impacting the user experience.
If you use MySQL/Postgres for session storage, you first need a table to store the session data in. More details about that in [[sessions]]({{< relref "configuration.md" >}}#session)
For Grafana itself it doesn't really matter if you store the session data on disk or database/redis/memcache. But we recommend using a database/redis/memcache since it makes it easier manage the grafana servers.
## Alerting
Currently alerting supports a limited form of high availability. Since v4.2.0 of Grafana, alert notifications are deduped when running multiple servers. This means all alerts are executed on every server but no duplicate alert notifications are sent due to the deduping logic. Proper load balancing of alerts will be introduced in the future.
Currently alerting supports a limited form of high availability. Since v4.2.0, alert notifications are deduped when running multiple servers. This means all alerts are executed on every server but alert notifications are only sent once per alert. Grafana does not support distributing the alert rule execution between servers. That might be added in the future but right now prefer to keep it simple.

View File

@ -143,7 +143,7 @@ td[class="stack-column-center"] {
<center>
<p style="text-align: center; font-size: 12px; color: #999999;">
Sent by <a href="[[.AppUrl]]">Grafana v[[.BuildVersion]]</a>
<br />&copy; 2016 Grafana and raintank
<br />&copy; 2018 Grafana Labs
</p>
</center>
</td>

View File

@ -4,7 +4,7 @@
"company": "Grafana Labs"
},
"name": "grafana",
"version": "5.0.1-pre1",
"version": "5.1.0-pre1",
"repository": {
"type": "git",
"url": "http://github.com/grafana/grafana.git"
@ -104,10 +104,10 @@
"test": "grunt test",
"test:coverage": "grunt test --coverage=true",
"lint": "tslint -c tslint.json --project tsconfig.json --type-check",
"karma": "node ./node_modules/grunt-cli/bin/grunt karma:dev",
"jest": "node ./node_modules/jest-cli/bin/jest.js --notify --watch",
"api-tests": "node ./node_modules/jest-cli/bin/jest.js --notify --watch --config=tests/api/jest.js",
"precommit": "lint-staged && node ./node_modules/grunt-cli/bin/grunt precommit"
"karma": "grunt karma:dev",
"jest": "jest --notify --watch",
"api-tests": "jest --notify --watch --config=tests/api/jest.js",
"precommit": "lint-staged && grunt precommit"
},
"lint-staged": {
"*.{ts,tsx}": [
@ -118,7 +118,7 @@
"prettier --write",
"git add"
],
"*.go": [
"*pkg/**/*.go": [
"gofmt -w -s",
"git add"
]
@ -136,6 +136,7 @@
"angular-route": "^1.6.6",
"angular-sanitize": "^1.6.6",
"babel-polyfill": "^6.26.0",
"baron": "^3.0.3",
"brace": "^0.10.0",
"classnames": "^2.2.5",
"clipboard": "^1.7.1",
@ -151,7 +152,6 @@
"moment": "^2.18.1",
"mousetrap": "^1.6.0",
"mousetrap-global-bind": "^1.1.0",
"perfect-scrollbar": "^1.2.0",
"prop-types": "^15.6.0",
"react": "^16.2.0",
"react-dom": "^16.2.0",

View File

@ -1,5 +1,5 @@
#! /usr/bin/env bash
version=4.6.3
version=5.0.2
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb

View File

@ -1,15 +1,15 @@
package api
import (
"regexp"
"strings"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/middleware"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/setting"
)
func AdminGetSettings(c *middleware.Context) {
func AdminGetSettings(c *m.ReqContext) {
settings := make(map[string]interface{})
for _, section := range setting.Cfg.Sections() {
@ -22,6 +22,14 @@ func AdminGetSettings(c *middleware.Context) {
if strings.Contains(keyName, "secret") || strings.Contains(keyName, "password") || (strings.Contains(keyName, "provider_config")) {
value = "************"
}
if strings.Contains(keyName, "url") {
var rgx = regexp.MustCompile(`.*:\/\/([^:]*):([^@]*)@.*?$`)
var subs = rgx.FindAllSubmatch([]byte(value), -1)
if subs != nil && len(subs[0]) == 3 {
value = strings.Replace(value, string(subs[0][1]), "******", 1)
value = strings.Replace(value, string(subs[0][2]), "******", 1)
}
}
jsonSec[keyName] = value
}
@ -30,7 +38,7 @@ func AdminGetSettings(c *middleware.Context) {
c.JSON(200, settings)
}
func AdminGetStats(c *middleware.Context) {
func AdminGetStats(c *m.ReqContext) {
statsQuery := m.GetAdminStatsQuery{}

View File

@ -4,12 +4,11 @@ import (
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/metrics"
"github.com/grafana/grafana/pkg/middleware"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/util"
)
func AdminCreateUser(c *middleware.Context, form dtos.AdminCreateUserForm) {
func AdminCreateUser(c *m.ReqContext, form dtos.AdminCreateUserForm) {
cmd := m.CreateUserCommand{
Login: form.Login,
Email: form.Email,
@ -47,15 +46,15 @@ func AdminCreateUser(c *middleware.Context, form dtos.AdminCreateUserForm) {
c.JSON(200, result)
}
func AdminUpdateUserPassword(c *middleware.Context, form dtos.AdminUpdateUserPasswordForm) {
userId := c.ParamsInt64(":id")
func AdminUpdateUserPassword(c *m.ReqContext, form dtos.AdminUpdateUserPasswordForm) {
userID := c.ParamsInt64(":id")
if len(form.Password) < 4 {
c.JsonApiErr(400, "New password too short", nil)
return
}
userQuery := m.GetUserByIdQuery{Id: userId}
userQuery := m.GetUserByIdQuery{Id: userID}
if err := bus.Dispatch(&userQuery); err != nil {
c.JsonApiErr(500, "Could not read user from database", err)
@ -65,7 +64,7 @@ func AdminUpdateUserPassword(c *middleware.Context, form dtos.AdminUpdateUserPas
passwordHashed := util.EncodePassword(form.Password, userQuery.Result.Salt)
cmd := m.ChangeUserPasswordCommand{
UserId: userId,
UserId: userID,
NewPassword: passwordHashed,
}
@ -77,11 +76,11 @@ func AdminUpdateUserPassword(c *middleware.Context, form dtos.AdminUpdateUserPas
c.JsonOK("User password updated")
}
func AdminUpdateUserPermissions(c *middleware.Context, form dtos.AdminUpdateUserPermissionsForm) {
userId := c.ParamsInt64(":id")
func AdminUpdateUserPermissions(c *m.ReqContext, form dtos.AdminUpdateUserPermissionsForm) {
userID := c.ParamsInt64(":id")
cmd := m.UpdateUserPermissionsCommand{
UserId: userId,
UserId: userID,
IsGrafanaAdmin: form.IsGrafanaAdmin,
}
@ -93,10 +92,10 @@ func AdminUpdateUserPermissions(c *middleware.Context, form dtos.AdminUpdateUser
c.JsonOK("User permissions updated")
}
func AdminDeleteUser(c *middleware.Context) {
userId := c.ParamsInt64(":id")
func AdminDeleteUser(c *m.ReqContext) {
userID := c.ParamsInt64(":id")
cmd := m.DeleteUserCommand{UserId: userId}
cmd := m.DeleteUserCommand{UserId: userID}
if err := bus.Dispatch(&cmd); err != nil {
c.JsonApiErr(500, "Failed to delete user", err)

View File

@ -5,15 +5,14 @@ import (
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/models"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/alerting"
"github.com/grafana/grafana/pkg/services/guardian"
)
func ValidateOrgAlert(c *middleware.Context) {
func ValidateOrgAlert(c *m.ReqContext) {
id := c.ParamsInt64(":alertId")
query := models.GetAlertByIdQuery{Id: id}
query := m.GetAlertByIdQuery{Id: id}
if err := bus.Dispatch(&query); err != nil {
c.JsonApiErr(404, "Alert not found", nil)
@ -26,28 +25,28 @@ func ValidateOrgAlert(c *middleware.Context) {
}
}
func GetAlertStatesForDashboard(c *middleware.Context) Response {
dashboardId := c.QueryInt64("dashboardId")
func GetAlertStatesForDashboard(c *m.ReqContext) Response {
dashboardID := c.QueryInt64("dashboardId")
if dashboardId == 0 {
return ApiError(400, "Missing query parameter dashboardId", nil)
if dashboardID == 0 {
return Error(400, "Missing query parameter dashboardId", nil)
}
query := models.GetAlertStatesForDashboardQuery{
query := m.GetAlertStatesForDashboardQuery{
OrgId: c.OrgId,
DashboardId: c.QueryInt64("dashboardId"),
}
if err := bus.Dispatch(&query); err != nil {
return ApiError(500, "Failed to fetch alert states", err)
return Error(500, "Failed to fetch alert states", err)
}
return Json(200, query.Result)
return JSON(200, query.Result)
}
// GET /api/alerts
func GetAlerts(c *middleware.Context) Response {
query := models.GetAlertsQuery{
func GetAlerts(c *m.ReqContext) Response {
query := m.GetAlertsQuery{
OrgId: c.OrgId,
DashboardId: c.QueryInt64("dashboardId"),
PanelId: c.QueryInt64("panelId"),
@ -61,20 +60,20 @@ func GetAlerts(c *middleware.Context) Response {
}
if err := bus.Dispatch(&query); err != nil {
return ApiError(500, "List alerts failed", err)
return Error(500, "List alerts failed", err)
}
for _, alert := range query.Result {
alert.Url = models.GetDashboardUrl(alert.DashboardUid, alert.DashboardSlug)
alert.Url = m.GetDashboardUrl(alert.DashboardUid, alert.DashboardSlug)
}
return Json(200, query.Result)
return JSON(200, query.Result)
}
// POST /api/alerts/test
func AlertTest(c *middleware.Context, dto dtos.AlertTestCommand) Response {
func AlertTest(c *m.ReqContext, dto dtos.AlertTestCommand) Response {
if _, idErr := dto.Dashboard.Get("id").Int64(); idErr != nil {
return ApiError(400, "The dashboard needs to be saved at least once before you can test an alert rule", nil)
return Error(400, "The dashboard needs to be saved at least once before you can test an alert rule", nil)
}
backendCmd := alerting.AlertTestCommand{
@ -85,9 +84,9 @@ func AlertTest(c *middleware.Context, dto dtos.AlertTestCommand) Response {
if err := bus.Dispatch(&backendCmd); err != nil {
if validationErr, ok := err.(alerting.ValidationError); ok {
return ApiError(422, validationErr.Error(), nil)
return Error(422, validationErr.Error(), nil)
}
return ApiError(500, "Failed to test rule", err)
return Error(500, "Failed to test rule", err)
}
res := backendCmd.Result
@ -110,30 +109,30 @@ func AlertTest(c *middleware.Context, dto dtos.AlertTestCommand) Response {
dtoRes.TimeMs = fmt.Sprintf("%1.3fms", res.GetDurationMs())
return Json(200, dtoRes)
return JSON(200, dtoRes)
}
// GET /api/alerts/:id
func GetAlert(c *middleware.Context) Response {
func GetAlert(c *m.ReqContext) Response {
id := c.ParamsInt64(":alertId")
query := models.GetAlertByIdQuery{Id: id}
query := m.GetAlertByIdQuery{Id: id}
if err := bus.Dispatch(&query); err != nil {
return ApiError(500, "List alerts failed", err)
return Error(500, "List alerts failed", err)
}
return Json(200, &query.Result)
return JSON(200, &query.Result)
}
func GetAlertNotifiers(c *middleware.Context) Response {
return Json(200, alerting.GetNotifiers())
func GetAlertNotifiers(c *m.ReqContext) Response {
return JSON(200, alerting.GetNotifiers())
}
func GetAlertNotifications(c *middleware.Context) Response {
query := &models.GetAllAlertNotificationsQuery{OrgId: c.OrgId}
func GetAlertNotifications(c *m.ReqContext) Response {
query := &m.GetAllAlertNotificationsQuery{OrgId: c.OrgId}
if err := bus.Dispatch(query); err != nil {
return ApiError(500, "Failed to get alert notifications", err)
return Error(500, "Failed to get alert notifications", err)
}
result := make([]*dtos.AlertNotification, 0)
@ -149,57 +148,57 @@ func GetAlertNotifications(c *middleware.Context) Response {
})
}
return Json(200, result)
return JSON(200, result)
}
func GetAlertNotificationById(c *middleware.Context) Response {
query := &models.GetAlertNotificationsQuery{
func GetAlertNotificationByID(c *m.ReqContext) Response {
query := &m.GetAlertNotificationsQuery{
OrgId: c.OrgId,
Id: c.ParamsInt64("notificationId"),
}
if err := bus.Dispatch(query); err != nil {
return ApiError(500, "Failed to get alert notifications", err)
return Error(500, "Failed to get alert notifications", err)
}
return Json(200, query.Result)
return JSON(200, query.Result)
}
func CreateAlertNotification(c *middleware.Context, cmd models.CreateAlertNotificationCommand) Response {
func CreateAlertNotification(c *m.ReqContext, cmd m.CreateAlertNotificationCommand) Response {
cmd.OrgId = c.OrgId
if err := bus.Dispatch(&cmd); err != nil {
return ApiError(500, "Failed to create alert notification", err)
return Error(500, "Failed to create alert notification", err)
}
return Json(200, cmd.Result)
return JSON(200, cmd.Result)
}
func UpdateAlertNotification(c *middleware.Context, cmd models.UpdateAlertNotificationCommand) Response {
func UpdateAlertNotification(c *m.ReqContext, cmd m.UpdateAlertNotificationCommand) Response {
cmd.OrgId = c.OrgId
if err := bus.Dispatch(&cmd); err != nil {
return ApiError(500, "Failed to update alert notification", err)
return Error(500, "Failed to update alert notification", err)
}
return Json(200, cmd.Result)
return JSON(200, cmd.Result)
}
func DeleteAlertNotification(c *middleware.Context) Response {
cmd := models.DeleteAlertNotificationCommand{
func DeleteAlertNotification(c *m.ReqContext) Response {
cmd := m.DeleteAlertNotificationCommand{
OrgId: c.OrgId,
Id: c.ParamsInt64("notificationId"),
}
if err := bus.Dispatch(&cmd); err != nil {
return ApiError(500, "Failed to delete alert notification", err)
return Error(500, "Failed to delete alert notification", err)
}
return ApiSuccess("Notification deleted")
return Success("Notification deleted")
}
//POST /api/alert-notifications/test
func NotificationTest(c *middleware.Context, dto dtos.NotificationTestCommand) Response {
func NotificationTest(c *m.ReqContext, dto dtos.NotificationTestCommand) Response {
cmd := &alerting.NotificationTestCommand{
Name: dto.Name,
Type: dto.Type,
@ -207,74 +206,74 @@ func NotificationTest(c *middleware.Context, dto dtos.NotificationTestCommand) R
}
if err := bus.Dispatch(cmd); err != nil {
if err == models.ErrSmtpNotEnabled {
return ApiError(412, err.Error(), err)
if err == m.ErrSmtpNotEnabled {
return Error(412, err.Error(), err)
}
return ApiError(500, "Failed to send alert notifications", err)
return Error(500, "Failed to send alert notifications", err)
}
return ApiSuccess("Test notification sent")
return Success("Test notification sent")
}
//POST /api/alerts/:alertId/pause
func PauseAlert(c *middleware.Context, dto dtos.PauseAlertCommand) Response {
alertId := c.ParamsInt64("alertId")
func PauseAlert(c *m.ReqContext, dto dtos.PauseAlertCommand) Response {
alertID := c.ParamsInt64("alertId")
query := models.GetAlertByIdQuery{Id: alertId}
query := m.GetAlertByIdQuery{Id: alertID}
if err := bus.Dispatch(&query); err != nil {
return ApiError(500, "Get Alert failed", err)
return Error(500, "Get Alert failed", err)
}
guardian := guardian.New(query.Result.DashboardId, c.OrgId, c.SignedInUser)
if canEdit, err := guardian.CanEdit(); err != nil || !canEdit {
if err != nil {
return ApiError(500, "Error while checking permissions for Alert", err)
return Error(500, "Error while checking permissions for Alert", err)
}
return ApiError(403, "Access denied to this dashboard and alert", nil)
return Error(403, "Access denied to this dashboard and alert", nil)
}
cmd := models.PauseAlertCommand{
cmd := m.PauseAlertCommand{
OrgId: c.OrgId,
AlertIds: []int64{alertId},
AlertIds: []int64{alertID},
Paused: dto.Paused,
}
if err := bus.Dispatch(&cmd); err != nil {
return ApiError(500, "", err)
return Error(500, "", err)
}
var response models.AlertStateType = models.AlertStatePending
var response m.AlertStateType = m.AlertStatePending
pausedState := "un-paused"
if cmd.Paused {
response = models.AlertStatePaused
response = m.AlertStatePaused
pausedState = "paused"
}
result := map[string]interface{}{
"alertId": alertId,
"alertId": alertID,
"state": response,
"message": "Alert " + pausedState,
}
return Json(200, result)
return JSON(200, result)
}
//POST /api/admin/pause-all-alerts
func PauseAllAlerts(c *middleware.Context, dto dtos.PauseAllAlertsCommand) Response {
updateCmd := models.PauseAllAlertCommand{
func PauseAllAlerts(c *m.ReqContext, dto dtos.PauseAllAlertsCommand) Response {
updateCmd := m.PauseAllAlertCommand{
Paused: dto.Paused,
}
if err := bus.Dispatch(&updateCmd); err != nil {
return ApiError(500, "Failed to pause alerts", err)
return Error(500, "Failed to pause alerts", err)
}
var response models.AlertStateType = models.AlertStatePending
var response m.AlertStateType = m.AlertStatePending
pausedState := "un paused"
if updateCmd.Paused {
response = models.AlertStatePaused
response = m.AlertStatePaused
pausedState = "paused"
}
@ -284,5 +283,5 @@ func PauseAllAlerts(c *middleware.Context, dto dtos.PauseAllAlertsCommand) Respo
"alertsAffected": updateCmd.ResultCount,
}
return Json(200, result)
return JSON(200, result)
}

Some files were not shown because too many files have changed in this diff Show More