Merge branch 'master' into dashboard-acl-ux2

This commit is contained in:
Marcus Efraimsson 2018-04-16 21:19:01 +02:00
commit bc8353ae14
No known key found for this signature in database
GPG Key ID: EBFE0FB04612DD4A
215 changed files with 4447 additions and 2180 deletions

View File

@ -13,8 +13,13 @@
* **Prometheus**: Show template variable candidate in query editor [#9210](https://github.com/grafana/grafana/issues/9210), thx [@mtanda](https://github.com/mtanda)
* **Prometheus**: Support POST for query and query_range [#9859](https://github.com/grafana/grafana/pull/9859), thx [@mtanda](https://github.com/mtanda)
* **Alerting**: Add support for retries on alert queries [#5855](https://github.com/grafana/grafana/issues/5855), thx [@Thib17](https://github.com/Thib17)
* **Table**: Table plugin value mappings [#7119](https://github.com/grafana/grafana/issues/7119), thx [infernix](https://github.com/infernix)
* **IE11**: IE 11 compatibility [#11165](https://github.com/grafana/grafana/issues/11165)
* **Scrolling**: Better scrolling experience [#11053](https://github.com/grafana/grafana/issues/11053), [#11252](https://github.com/grafana/grafana/issues/11252), [#10836](https://github.com/grafana/grafana/issues/10836), [#11185](https://github.com/grafana/grafana/issues/11185), [#11168](https://github.com/grafana/grafana/issues/11168)
* **Docker**: Improved docker image (breaking changes regarding file ownership) [grafana-docker #141](https://github.com/grafana/grafana-docker/issues/141), thx [@Spindel](https://github.com/Spindel), [@ChristianKniep](https://github.com/ChristianKniep), [@brancz](https://github.com/brancz) and [@jangaraj](https://github.com/jangaraj)
### Minor
* **OpsGenie**: Add triggered alerts as description [#11046](https://github.com/grafana/grafana/pull/11046), thx [@llamashoes](https://github.com/llamashoes)
* **Cloudwatch**: Support high resolution metrics [#10925](https://github.com/grafana/grafana/pull/10925), thx [@mtanda](https://github.com/mtanda)
* **Cloudwatch**: Add dimension filtering to CloudWatch `dimension_values()` [#10029](https://github.com/grafana/grafana/issues/10029), thx [@willyhutw](https://github.com/willyhutw)
@ -25,6 +30,28 @@
* **Shortcuts**: Add shortcut for duplicate panel [#11102](https://github.com/grafana/grafana/issues/11102)
* **AuthProxy**: Support IPv6 in Auth proxy white list [#11330](https://github.com/grafana/grafana/pull/11330), thx [@corny](https://github.com/corny)
* **SMTP**: Don't connect to STMP server using TLS unless configured. [#7189](https://github.com/grafana/grafana/issues/7189)
* **Prometheus**: Escape backslash in labels correctly. [#10555](https://github.com/grafana/grafana/issues/10555), thx [@roidelapluie](https://github.com/roidelapluie)
* **Variables**: Case-insensitive sorting for template values [#11128](https://github.com/grafana/grafana/issues/11128) thx [@cross](https://github.com/cross)
* **Annotations (native)**: Change default limit from 10 to 100 when querying api [#11569](https://github.com/grafana/grafana/issues/11569), thx [@flopp999](https://github.com/flopp999)
* **MySQL/Postgres/MSSQL**: PostgreSQL datasource generates invalid query with dates before 1970 [#11530](https://github.com/grafana/grafana/issues/11530) thx [@ryantxu](https://github.com/ryantxu)
* **Kiosk**: Adds url parameter for starting a dashboard in inactive mode [#11228](https://github.com/grafana/grafana/issues/11228), thx [@towolf](https://github.com/towolf)
* **Dashboard**: Enable closing timepicker using escape key [#11332](https://github.com/grafana/grafana/issues/11332)
* **Datasources**: Rename direct access mode in the data source settings [#11391](https://github.com/grafana/grafana/issues/11391)
* **Search**: Display dashboards in folder indented [#11073](https://github.com/grafana/grafana/issues/11073)
* **Units**: Use B/s instead Bps for Bytes per second [#9342](https://github.com/grafana/grafana/pull/9342), thx [@mayli](https://github.com/mayli)
* **Units**: Radiation units [#11001](https://github.com/grafana/grafana/issues/11001), thx [@victorclaessen](https://github.com/victorclaessen)
* **Units**: Timeticks unit [#11183](https://github.com/grafana/grafana/pull/11183), thx [@jtyr](https://github.com/jtyr)
* **Units**: Concentration units and "Normal cubic metre" [#11211](https://github.com/grafana/grafana/issues/11211), thx [@flopp999](https://github.com/flopp999)
* **Units**: New currency - Czech koruna [#11384](https://github.com/grafana/grafana/pull/11384), thx [@Rohlik](https://github.com/Rohlik)
* **Avatar**: Fix DISABLE_GRAVATAR option [#11095](https://github.com/grafana/grafana/issues/11095)
* **Heatmap**: Disable log scale when using time time series buckets [#10792](https://github.com/grafana/grafana/issues/10792)
* **Provisioning**: Remove `id` from json when provisioning dashboards, [#11138](https://github.com/grafana/grafana/issues/11138)
* **Prometheus**: tooltip for legend format not showing properly [#11516](https://github.com/grafana/grafana/issues/11516), thx [@svenklemm](https://github.com/svenklemm)
* **Playlist**: Empty playlists cannot be deleted [#11133](https://github.com/grafana/grafana/issues/11133), thx [@kichristensen](https://github.com/kichristensen)
* **Switch Orgs**: Alphabetic order in Switch Organization modal [#11556](https://github.com/grafana/grafana/issues/11556)
### Tech
* Migrated JavaScript files to TypeScript
# 5.0.4 (2018-03-28)
@ -52,7 +79,7 @@
* **Dashboards**: Changing templated value from dropdown is causing unsaved changes [#11063](https://github.com/grafana/grafana/issues/11063)
* **Prometheus**: Fixes bundled Prometheus 2.0 dashboard [#11016](https://github.com/grafana/grafana/issues/11016), thx [@roidelapluie](https://github.com/roidelapluie)
* **Sidemenu**: Profile menu "invisible" when gravatar is disabled [#11097](https://github.com/grafana/grafana/issues/11097)
* **Dashboard**: Fixes a bug with resizeable handles for panels [#11103](https://github.com/grafana/grafana/issues/11103)
* **Dashboard**: Fixes a bug with resizable handles for panels [#11103](https://github.com/grafana/grafana/issues/11103)
* **Alerting**: Telegram inline image mode fails when caption too long [#10975](https://github.com/grafana/grafana/issues/10975)
* **Alerting**: Fixes silent failing validation [#11145](https://github.com/grafana/grafana/pull/11145)
* **OAuth**: Only use jwt token if it contains an email address [#11127](https://github.com/grafana/grafana/pull/11127)
@ -116,7 +143,7 @@ Grafana v5.0 is going to be the biggest and most foundational release Grafana ha
### New Major Features
- **Dashboards** Dashboard folders, [#1611](https://github.com/grafana/grafana/issues/1611)
- **Teams** User groups (teams) implemented. Can be used in folder & dashboard permission list.
- **Dashboard grid**: Panels are now layed out in a two dimensional grid (with x, y, w, h). [#9093](https://github.com/grafana/grafana/issues/9093).
- **Dashboard grid**: Panels are now laid out in a two dimensional grid (with x, y, w, h). [#9093](https://github.com/grafana/grafana/issues/9093).
- **Templating**: Vertical repeat direction for panel repeats.
- **UX**: Major update to page header and navigation
- **Dashboard settings**: Combine dashboard settings views into one with side menu, [#9750](https://github.com/grafana/grafana/issues/9750)
@ -150,7 +177,7 @@ Dashboard panels and rows are positioned using a gridPos object `{x: 0, y: 0, w:
* **Dashboard history**: New config file option versions_to_keep sets how many versions per dashboard to store, [#9671](https://github.com/grafana/grafana/issues/9671)
* **Dashboard as cfg**: Load dashboards from file into Grafana on startup/change [#9654](https://github.com/grafana/grafana/issues/9654) [#5269](https://github.com/grafana/grafana/issues/5269)
* **Prometheus**: Grafana can now send alerts to Prometheus Alertmanager while firing [#7481](https://github.com/grafana/grafana/issues/7481), thx [@Thib17](https://github.com/Thib17) and [@mtanda](https://github.com/mtanda)
* **Table**: Support multiple table formated queries in table panel [#9170](https://github.com/grafana/grafana/issues/9170), thx [@davkal](https://github.com/davkal)
* **Table**: Support multiple table formatted queries in table panel [#9170](https://github.com/grafana/grafana/issues/9170), thx [@davkal](https://github.com/davkal)
* **Security**: Protect against brute force (frequent) login attempts [#7616](https://github.com/grafana/grafana/issues/7616)
## Minor
@ -172,7 +199,7 @@ Dashboard panels and rows are positioned using a gridPos object `{x: 0, y: 0, w:
* **Sensu**: Send alert message to sensu output [#9551](https://github.com/grafana/grafana/issues/9551), thx [@cjchand](https://github.com/cjchand)
* **Singlestat**: suppress error when result contains no datapoints [#9636](https://github.com/grafana/grafana/issues/9636), thx [@utkarshcmu](https://github.com/utkarshcmu)
* **Postgres/MySQL**: Control quoting in SQL-queries when using template variables [#9030](https://github.com/grafana/grafana/issues/9030), thanks [@svenklemm](https://github.com/svenklemm)
* **Pagerduty**: Pagerduty dont auto resolve incidents by default anymore. [#10222](https://github.com/grafana/grafana/issues/10222)
* **Pagerduty**: Pagerduty don't auto resolve incidents by default anymore. [#10222](https://github.com/grafana/grafana/issues/10222)
* **Cloudwatch**: Fix for multi-valued templated queries. [#9903](https://github.com/grafana/grafana/issues/9903)
## Tech
@ -250,7 +277,7 @@ The following properties have been deprecated and will be removed in a future re
* **Annotations**: Add support for creating annotations from graph panel [#8197](https://github.com/grafana/grafana/pull/8197)
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Graph**: Add support for local formatting in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/2764)
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)
@ -287,7 +314,7 @@ The following properties have been deprecated and will be removed in a future re
* **Graphite**: Fix for Grafana internal metrics to Graphite sending NaN values [#9279](https://github.com/grafana/grafana/issues/9279)
* **HTTP API**: Fix for HEAD method requests [#9307](https://github.com/grafana/grafana/issues/9307)
* **Templating**: Fix for duplicate template variable queries when refresh is set to time range change [#9185](https://github.com/grafana/grafana/issues/9185)
* **Metrics**: dont write NaN values to graphite [#9279](https://github.com/grafana/grafana/issues/9279)
* **Metrics**: don't write NaN values to graphite [#9279](https://github.com/grafana/grafana/issues/9279)
# 4.5.1 (2017-09-15)
@ -324,12 +351,12 @@ The following properties have been deprecated and will be removed in a future re
### Breaking change
* **InfluxDB/Elasticsearch**: The panel & data source option named "Group by time interval" is now named "Min time interval" and does now always define a lower limit for the auto group by time. Without having to use `>` prefix (that prefix still works). This should in theory have close to zero actual impact on existing dashboards. It does mean that if you used this setting to define a hard group by time interval of, say "1d", if you zoomed to a time range wide enough the time range could increase above the "1d" range as the setting is now always considered a lower limit.
* **Elasticsearch**: Elasticsearch metric queries without date histogram now return table formated data making table panel much easier to use for this use case. Should not break/change existing dashboards with stock panels but external panel plugins can be affected.
* **Elasticsearch**: Elasticsearch metric queries without date histogram now return table formatted data making table panel much easier to use for this use case. Should not break/change existing dashboards with stock panels but external panel plugins can be affected.
## Changes
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
* **InfluxDB**: Added parenthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
## Bug Fixes
@ -341,7 +368,7 @@ The following properties have been deprecated and will be removed in a future re
## Bug Fixes
* **Search**: Fix for issue that casued search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
* **Search**: Fix for issue that caused search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
* **Modals**: ESC key now closes modal again, fixes [#8981](https://github.com/grafana/grafana/issues/8988), thx [@j-white](https://github.com/j-white)
# 4.4.2 (2017-08-01)
@ -680,12 +707,12 @@ due to too many connections/file handles on the data source backend. This proble
### Enhancements
* **Login**: Adds option to disable username/password logins, closes [#4674](https://github.com/grafana/grafana/issues/4674)
* **SingleStat**: Add seriename as option in singlestat panel, closes [#4740](https://github.com/grafana/grafana/issues/4740)
* **Localization**: Week start day now dependant on browser locale setting, closes [#3003](https://github.com/grafana/grafana/issues/3003)
* **Localization**: Week start day now dependent on browser locale setting, closes [#3003](https://github.com/grafana/grafana/issues/3003)
* **Templating**: Update panel repeats for variables that change on time refresh, closes [#5021](https://github.com/grafana/grafana/issues/5021)
* **Templating**: Add support for numeric and alphabetical sorting of variable values, closes [#2839](https://github.com/grafana/grafana/issues/2839)
* **Elasticsearch**: Support to set Precision Threshold for Unique Count metric, closes [#4689](https://github.com/grafana/grafana/issues/4689)
* **Navigation**: Add search to org swithcer, closes [#2609](https://github.com/grafana/grafana/issues/2609)
* **Database**: Allow database config using one propertie, closes [#5456](https://github.com/grafana/grafana/pull/5456)
* **Database**: Allow database config using one property, closes [#5456](https://github.com/grafana/grafana/pull/5456)
* **Graphite**: Add support for groupByNodes, closes [#5613](https://github.com/grafana/grafana/pull/5613)
* **Influxdb**: Add support for elapsed(), closes [#5827](https://github.com/grafana/grafana/pull/5827)
* **OpenTSDB**: Add support for explicitTags for OpenTSDB>=2.3, closes [#6360](https://github.com/grafana/grafana/pull/6361)
@ -752,7 +779,7 @@ due to too many connections/file handles on the data source backend. This proble
* **Datasource**: Pending data source requests are cancelled before new ones are issues (Graphite & Prometheus), closes [#5321](https://github.com/grafana/grafana/issues/5321)
### Breaking changes
* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log ouput.
* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log output.
* **Graphite** : The Graph panel no longer have a Graphite PNG option. closes [#5367](https://github.com/grafana/grafana/issues/5367)
### Bug fixes
@ -770,7 +797,7 @@ due to too many connections/file handles on the data source backend. This proble
* **Annotations**: Annotations can now use a template variable as data source, closes [#5054](https://github.com/grafana/grafana/issues/5054)
* **Time picker**: Fixed issue timepicker and UTC when reading time from URL, fixes [#5078](https://github.com/grafana/grafana/issues/5078)
* **CloudWatch**: Support for Multiple Account by AssumeRole, closes [#3522](https://github.com/grafana/grafana/issues/3522)
* **Singlestat**: Fixed alignment and minium height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
* **Singlestat**: Fixed alignment and minimum height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
* **Share modal**: Fixed link when using grafana under dashboard sub url, fixes [#5109](https://github.com/grafana/grafana/issues/5109)
* **Prometheus**: Fixed bug in query editor that caused it not to load when reloading page, fixes [#5107](https://github.com/grafana/grafana/issues/5107)
* **Elasticsearch**: Fixed bug when template variable query returns numeric values, fixes [#5097](https://github.com/grafana/grafana/issues/5097), fixes [#5088](https://github.com/grafana/grafana/issues/5088)
@ -787,7 +814,7 @@ due to too many connections/file handles on the data source backend. This proble
* **Graph**: Fixed broken PNG rendering in graph panel, fixes [#5025](https://github.com/grafana/grafana/issues/5025)
* **Graph**: Fixed broken xaxis on graph panel, fixes [#5024](https://github.com/grafana/grafana/issues/5024)
* **Influxdb**: Fixes crash when hiding middle serie, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
* **Influxdb**: Fixes crash when hiding middle series, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
# 3.0.1 Stable (2016-05-11)
@ -799,7 +826,7 @@ due to too many connections/file handles on the data source backend. This proble
### Bug fixes
* **Dashboard title**: Fixed max dashboard title width (media query) for large screens, fixes [#4859](https://github.com/grafana/grafana/issues/4859)
* **Annotations**: Fixed issue with entering annotation edit view, fixes [#4857](https://github.com/grafana/grafana/issues/4857)
* **Remove query**: Fixed issue with removing query for data sources without collapsable query editors, fixes [#4856](https://github.com/grafana/grafana/issues/4856)
* **Remove query**: Fixed issue with removing query for data sources without collapsible query editors, fixes [#4856](https://github.com/grafana/grafana/issues/4856)
* **Graphite PNG**: Fixed issue graphite png rendering option, fixes [#4864](https://github.com/grafana/grafana/issues/4864)
* **InfluxDB**: Fixed issue missing plus group by iconn, fixes [#4862](https://github.com/grafana/grafana/issues/4862)
* **Graph**: Fixes missing line mode for thresholds, fixes [#4902](https://github.com/grafana/grafana/pull/4902)
@ -815,11 +842,11 @@ due to too many connections/file handles on the data source backend. This proble
### Bug fixes
* **InfluxDB 0.12**: Fixed issue templating and `show tag values` query only returning tags for first measurement, fixes [#4726](https://github.com/grafana/grafana/issues/4726)
* **Templating**: Fixed issue with regex formating when matching multiple values, fixes [#4755](https://github.com/grafana/grafana/issues/4755)
* **Templating**: Fixed issue with regex formatting when matching multiple values, fixes [#4755](https://github.com/grafana/grafana/issues/4755)
* **Templating**: Fixed issue with custom all value and escaping, fixes [#4736](https://github.com/grafana/grafana/issues/4736)
* **Dashlist**: Fixed issue dashboard list panel and caching tags, fixes [#4768](https://github.com/grafana/grafana/issues/4768)
* **Graph**: Fixed issue with unneeded scrollbar in legend for Firefox, fixes [#4760](https://github.com/grafana/grafana/issues/4760)
* **Table panel**: Fixed issue table panel formating string array properties, fixes [#4791](https://github.com/grafana/grafana/issues/4791)
* **Table panel**: Fixed issue table panel formatting string array properties, fixes [#4791](https://github.com/grafana/grafana/issues/4791)
* **grafana-cli**: Improve error message when failing to install plugins due to corrupt response, fixes [#4651](https://github.com/grafana/grafana/issues/4651)
* **Singlestat**: Fixes prefix an postfix for gauges, fixes [#4812](https://github.com/grafana/grafana/issues/4812)
* **Singlestat**: Fixes auto-refresh on change for some options, fixes [#4809](https://github.com/grafana/grafana/issues/4809)
@ -911,7 +938,7 @@ slack channel (link to slack channel in readme).
### Bug fixes
* **Playlist**: Fix for memory leak when running a playlist, closes [#3794](https://github.com/grafana/grafana/pull/3794)
* **InfluxDB**: Fix for InfluxDB and table panel when using Format As Table and having group by time, fixes [#3928](https://github.com/grafana/grafana/issues/3928)
* **Panel Time shift**: Fix for panel time range and using dashboard times liek `Today` and `This Week`, fixes [#3941](https://github.com/grafana/grafana/issues/3941)
* **Panel Time shift**: Fix for panel time range and using dashboard times like `Today` and `This Week`, fixes [#3941](https://github.com/grafana/grafana/issues/3941)
* **Row repeat**: Repeated rows will now appear next to each other and not by the bottom of the dashboard, fixes [#3942](https://github.com/grafana/grafana/issues/3942)
* **Png renderer**: Fix for phantomjs path on windows, fixes [#3657](https://github.com/grafana/grafana/issues/3657)
@ -935,7 +962,7 @@ slack channel (link to slack channel in readme).
### Bug Fixes
* **metric editors**: Fix for clicking typeahead auto dropdown option, fixes [#3428](https://github.com/grafana/grafana/issues/3428)
* **influxdb**: Fixed issue showing Group By label only on first query, fixes [#3453](https://github.com/grafana/grafana/issues/3453)
* **logging**: Add more verbose info logging for http reqeusts, closes [#3405](https://github.com/grafana/grafana/pull/3405)
* **logging**: Add more verbose info logging for http requests, closes [#3405](https://github.com/grafana/grafana/pull/3405)
# 2.6.0-Beta1 (2015-12-04)
@ -962,7 +989,7 @@ slack channel (link to slack channel in readme).
**New Feature: Mix data sources**
- A built in data source is now available named `-- Mixed --`, When picked in the metrics tab,
it allows you to add queries of differnet data source types & instances to the same graph/panel!
it allows you to add queries of different data source types & instances to the same graph/panel!
[Issue #436](https://github.com/grafana/grafana/issues/436)
**New Feature: Elasticsearch Metrics Query Editor and Viz Support**
@ -1001,7 +1028,7 @@ it allows you to add queries of differnet data source types & instances to the s
- [Issue #2564](https://github.com/grafana/grafana/issues/2564). Templating: Another atempt at fixing #2534 (Init multi value template var used in repeat panel from url)
- [Issue #2620](https://github.com/grafana/grafana/issues/2620). Graph: multi series tooltip did no highlight correct point when stacking was enabled and series were of different resolution
- [Issue #2636](https://github.com/grafana/grafana/issues/2636). InfluxDB: Do no show template vars in dropdown for tag keys and group by keys
- [Issue #2604](https://github.com/grafana/grafana/issues/2604). InfluxDB: More alias options, can now use `$[0-9]` syntax to reference part of a measurement name (seperated by dots)
- [Issue #2604](https://github.com/grafana/grafana/issues/2604). InfluxDB: More alias options, can now use `$[0-9]` syntax to reference part of a measurement name (separated by dots)
**Breaking Changes**
- Notice to makers/users of custom data sources, there is a minor breaking change in 2.2 that
@ -1083,7 +1110,7 @@ Grunt & Watch tasks:
- [Issue #1826](https://github.com/grafana/grafana/issues/1826). User role 'Viewer' are now prohibited from entering edit mode (and doing other transient dashboard edits). A new role `Read Only Editor` will replace the old Viewer behavior
- [Issue #1928](https://github.com/grafana/grafana/issues/1928). HTTP API: GET /api/dashboards/db/:slug response changed property `model` to `dashboard` to match the POST request nameing
- Backend render URL changed from `/render/dashboard/solo` `render/dashboard-solo/` (in order to have consistent dashboard url `/dashboard/:type/:slug`)
- Search HTTP API response has changed (simplified), tags list moved to seperate HTTP resource URI
- Search HTTP API response has changed (simplified), tags list moved to separate HTTP resource URI
- Datasource HTTP api breaking change, ADD datasource is now POST /api/datasources/, update is now PUT /api/datasources/:id
**Fixes**
@ -1100,7 +1127,7 @@ Grunt & Watch tasks:
# 2.0.2 (2015-04-22)
**Fixes**
- [Issue #1832](https://github.com/grafana/grafana/issues/1832). Graph Panel + Legend Table mode: Many series casued zero height graph, now legend will never reduce the height of the graph below 50% of row height.
- [Issue #1832](https://github.com/grafana/grafana/issues/1832). Graph Panel + Legend Table mode: Many series caused zero height graph, now legend will never reduce the height of the graph below 50% of row height.
- [Issue #1846](https://github.com/grafana/grafana/issues/1846). Snapshots: Fixed issue with snapshoting dashboards with an interval template variable
- [Issue #1848](https://github.com/grafana/grafana/issues/1848). Panel timeshift: You can now use panel timeshift without a relative time override
@ -1142,7 +1169,7 @@ Grunt & Watch tasks:
**Fixes**
- [Issue #1649](https://github.com/grafana/grafana/issues/1649). HTTP API: grafana /render calls nows with api keys
- [Issue #1667](https://github.com/grafana/grafana/issues/1667). Datasource proxy & session timeout fix (casued 401 Unauthorized error after a while)
- [Issue #1667](https://github.com/grafana/grafana/issues/1667). Datasource proxy & session timeout fix (caused 401 Unauthorized error after a while)
- [Issue #1707](https://github.com/grafana/grafana/issues/1707). Unsaved changes: Do not show for snapshots, scripted and file based dashboards
- [Issue #1703](https://github.com/grafana/grafana/issues/1703). Unsaved changes: Do not show for users with role `Viewer`
- [Issue #1675](https://github.com/grafana/grafana/issues/1675). Data source proxy: Fixed issue with Gzip enabled and data source proxy
@ -1155,14 +1182,14 @@ Grunt & Watch tasks:
**Important Note**
Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated backend server. Please read the [Documentation](http://docs.grafana.org) for more detailed about this SIGNIFCANT change to Grafana
Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated backend server. Please read the [Documentation](http://docs.grafana.org) for more detailed about this SIGNIFICANT change to Grafana
**New features**
- [Issue #1623](https://github.com/grafana/grafana/issues/1623). Share Dashboard: Dashboard snapshot sharing (dash and data snapshot), save to local or save to public snapshot dashboard snapshots.raintank.io site
- [Issue #1622](https://github.com/grafana/grafana/issues/1622). Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embedd a single graph on another web site
- [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes inbetween the user is promted with a warning if he really wants to overwrite the other's changes
- [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes in between the user is promted with a warning if he really wants to overwrite the other's changes
- [Issue #1331](https://github.com/grafana/grafana/issues/1331). Graph & Singlestat: New axis/unit format selector and more units (kbytes, Joule, Watt, eV), and new design for graph axis & grid tab and single stat options tab views
- [Issue #1241](https://github.com/grafana/grafana/issues/1242). Timepicker: New option in timepicker (under dashboard settings), to change ``now`` to be for example ``now-1m``, usefull when you want to ignore last minute because it contains incomplete data
- [Issue #1241](https://github.com/grafana/grafana/issues/1242). Timepicker: New option in timepicker (under dashboard settings), to change ``now`` to be for example ``now-1m``, useful when you want to ignore last minute because it contains incomplete data
- [Issue #171](https://github.com/grafana/grafana/issues/171). Panel: Different time periods, panels can override dashboard relative time and/or add a time shift
- [Issue #1488](https://github.com/grafana/grafana/issues/1488). Dashboard: Clone dashboard / Save as
- [Issue #1458](https://github.com/grafana/grafana/issues/1458). User: persisted user option for dark or light theme (no longer an option on a dashboard)
@ -1193,7 +1220,7 @@ Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated
**OpenTSDB breaking change**
- [Issue #1438](https://github.com/grafana/grafana/issues/1438). OpenTSDB: Automatic downsample interval passed to OpenTSDB (depends on timespan and graph width)
- NOTICE, Downsampling is now enabled by default, so if you have not picked a downsample aggregator in your metric query do so or your graphs will be missleading
- NOTICE, Downsampling is now enabled by default, so if you have not picked a downsample aggregator in your metric query do so or your graphs will be misleading
- This will make Grafana a lot quicker for OpenTSDB users when viewing large time spans without having to change the downsample interval manually.
**Tech**
@ -1224,7 +1251,7 @@ Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated
- [Issue #1114](https://github.com/grafana/grafana/issues/1114). Graphite: Lexer fix, allow equal sign (=) in metric paths
- [Issue #1136](https://github.com/grafana/grafana/issues/1136). Graph: Fix to legend value Max and negative values
- [Issue #1150](https://github.com/grafana/grafana/issues/1150). SinglestatPanel: Fixed absolute drilldown link issue
- [Issue #1123](https://github.com/grafana/grafana/issues/1123). Firefox: Workaround for Firefox bug, casued input text fields to not be selectable and not have placeable cursor
- [Issue #1123](https://github.com/grafana/grafana/issues/1123). Firefox: Workaround for Firefox bug, caused input text fields to not be selectable and not have placeable cursor
- [Issue #1108](https://github.com/grafana/grafana/issues/1108). Graph: Fix for tooltip series order when series draw order was changed with zindex property
# 1.9.0-rc1 (2014-11-17)
@ -1301,7 +1328,7 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #234](https://github.com/grafana/grafana/issues/234). Templating: Interval variable type for time intervals summarize/group by parameter, included "auto" option, and auto step counts option.
- [Issue #262](https://github.com/grafana/grafana/issues/262). Templating: Ability to use template variables for function parameters via custom variable type, can be used as parameter for movingAverage or scaleToSeconds for example
- [Issue #312](https://github.com/grafana/grafana/issues/312). Templating: Can now use template variables in panel titles
- [Issue #613](https://github.com/grafana/grafana/issues/613). Templating: Full support for InfluxDB, filter by part of series names, extract series substrings, nested queries, multipe where clauses!
- [Issue #613](https://github.com/grafana/grafana/issues/613). Templating: Full support for InfluxDB, filter by part of series names, extract series substrings, nested queries, multiple where clauses!
- Template variables can be initialized from url, with var-my_varname=value, breaking change, before it was just my_varname.
- Templating and url state sync has some issues that are not solved for this release, see [Issue #772](https://github.com/grafana/grafana/issues/772) for more details.
@ -1390,7 +1417,7 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #136](https://github.com/grafana/grafana/issues/136). Graph: New legend display option "Align as table"
- [Issue #556](https://github.com/grafana/grafana/issues/556). Graph: New legend display option "Right side", will show legend to the right of the graph
- [Issue #604](https://github.com/grafana/grafana/issues/604). Graph: New axis format, 'bps' (SI unit in steps of 1000) useful for network gear metics
- [Issue #626](https://github.com/grafana/grafana/issues/626). Graph: Downscale y axis to more precise unit, value of 0.1 for seconds format will be formated as 100 ms. Thanks @kamaradclimber
- [Issue #626](https://github.com/grafana/grafana/issues/626). Graph: Downscale y axis to more precise unit, value of 0.1 for seconds format will be formatted as 100 ms. Thanks @kamaradclimber
- [Issue #618](https://github.com/grafana/grafana/issues/618). OpenTSDB: Series alias option to override metric name returned from opentsdb. Thanks @heldr
**Documentation**
@ -1420,13 +1447,13 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #522](https://github.com/grafana/grafana/issues/522). Series names and column name typeahead cache fix
- [Issue #504](https://github.com/grafana/grafana/issues/504). Fixed influxdb issue with raw query that caused wrong value column detection
- [Issue #526](https://github.com/grafana/grafana/issues/526). Default property that marks which datasource is default in config.js is now optional
- [Issue #342](https://github.com/grafana/grafana/issues/342). Auto-refresh caused 2 refreshes (and hence mulitple queries) each time (at least in firefox)
- [Issue #342](https://github.com/grafana/grafana/issues/342). Auto-refresh caused 2 refreshes (and hence multiple queries) each time (at least in firefox)
# 1.6.0 (2014-06-16)
#### New features or improvements
- [Issue #427](https://github.com/grafana/grafana/issues/427). New Y-axis formater for metric values that represent seconds, Thanks @jippi
- [Issue #390](https://github.com/grafana/grafana/issues/390). Allow special characters in serie names (influxdb datasource), Thanks @majst01
- [Issue #390](https://github.com/grafana/grafana/issues/390). Allow special characters in series names (influxdb datasource), Thanks @majst01
- [Issue #428](https://github.com/grafana/grafana/issues/428). Refactoring of filterSrv, Thanks @Tetha
- [Issue #445](https://github.com/grafana/grafana/issues/445). New config for playlist feature. Set playlist_timespan to set default playlist interval, Thanks @rmca
- [Issue #461](https://github.com/grafana/grafana/issues/461). New graphite function definition added isNonNull, Thanks @tmonk42
@ -1447,13 +1474,13 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
- [Issue #475](https://github.com/grafana/grafana/issues/475). Add panel icon and Row edit button is replaced by the Row edit menu
- New graphs now have a default empty query
- Add Row button now creates a row with default height of 250px (no longer opens dashboard settings modal)
- Clean up of config.sample.js, graphiteUrl removed (still works, but depricated, removed in future)
- Clean up of config.sample.js, graphiteUrl removed (still works, but deprecated, removed in future)
Use datasources config instead. panel_names removed from config.js. Use plugins.panels to add custom panels
- Graphite panel is now renamed graph (Existing dashboards will still work)
#### Fixes
- [Issue #126](https://github.com/grafana/grafana/issues/126). Graphite query lexer change, can now handle regex parameters for aliasSub function
- [Issue #447](https://github.com/grafana/grafana/issues/447). Filter option loading when having muliple nested filters now works better. Options are now reloaded correctly and there are no multiple renders/refresh inbetween.
- [Issue #447](https://github.com/grafana/grafana/issues/447). Filter option loading when having muliple nested filters now works better. Options are now reloaded correctly and there are no multiple renders/refresh in between.
- [Issue #412](https://github.com/grafana/grafana/issues/412). After a filter option is changed and a nested template param is reloaded, if the current value exists after the options are reloaded the current selected value is kept.
- [Issue #460](https://github.com/grafana/grafana/issues/460). Legend Current value did not display when value was zero
- [Issue #328](https://github.com/grafana/grafana/issues/328). Fix to series toggling bug that caused annotations to be hidden when toggling/hiding series.

View File

@ -22,7 +22,6 @@ module.exports = function (grunt) {
}
}
config.coverage = grunt.option('coverage');
config.phjs = grunt.option('phjsToRelease');
config.pkg.version = grunt.option('pkgVer') || config.pkg.version;

View File

@ -9,6 +9,7 @@ upgrading Grafana please check here before creating an issue.
- [Datasource plugin written in typescript](https://github.com/grafana/typescript-template-datasource)
- [Simple json dataource plugin](https://github.com/grafana/simple-json-datasource)
- [Plugin development guide](http://docs.grafana.org/plugins/developing/development/)
- [Webpack Grafana plugin template project](https://github.com/CorpGlory/grafana-plugin-template-webpack)
## Changes in v4.6

View File

@ -7,7 +7,7 @@ clone_folder: c:\gopath\src\github.com\grafana\grafana
environment:
nodejs_version: "6"
GOPATH: c:\gopath
GOVERSION: 1.9.2
GOVERSION: 1.10
install:
- rmdir c:\go /s /q

View File

@ -1,13 +0,0 @@
coverage:
precision: 2
round: down
range: "50...100"
status:
project: yes
patch: yes
changes: no
comment:
layout: "diff"
behavior: "once"

View File

@ -64,7 +64,7 @@
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as seperate properties or as on string using the url propertie.
# as separate properties or as on string using the url properties.
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3

View File

@ -38,7 +38,7 @@ CACHE_QUERY_PORT = 7002
LOG_UPDATES = False
# Enable AMQP if you want to receve metrics using an amqp broker
# Enable AMQP if you want to receive metrics using an amqp broker
# ENABLE_AMQP = False
# Verbose means a line will be logged for every metric received

View File

@ -41,7 +41,7 @@ PICKLE_RECEIVER_PORT = 2004
CACHE_QUERY_INTERFACE = 0.0.0.0
CACHE_QUERY_PORT = 7002
# Enable AMQP if you want to receve metrics using you amqp broker
# Enable AMQP if you want to receive metrics using you amqp broker
ENABLE_AMQP = True
# Verbose means a line will be logged for every metric received

View File

@ -265,7 +265,7 @@ WHISPER_FALLOCATE_CREATE = True
# CARBON_METRIC_PREFIX = carbon
# CARBON_METRIC_INTERVAL = 60
# Enable AMQP if you want to receve metrics using an amqp broker
# Enable AMQP if you want to receive metrics using an amqp broker
# ENABLE_AMQP = False
# Verbose means a line will be logged for every metric received

View File

@ -30,7 +30,7 @@ give_completer_focus = shift-space
# pertain only to specific metric types.
#
# The dashboard presents only metrics that fall into specified naming schemes
# defined in this file. This creates a simpler, more targetted view of the
# defined in this file. This creates a simpler, more targeted view of the
# data. The general form for defining a naming scheme is as follows:
#
#[Metric Type]

View File

@ -100,7 +100,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1521715844826,
"iteration": 1523320861623,
"links": [],
"panels": [
{
@ -443,7 +443,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -522,7 +526,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -601,7 +609,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -680,7 +692,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -759,7 +775,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -838,7 +858,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -927,7 +951,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1026,7 +1054,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1115,7 +1147,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1196,7 +1232,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1285,7 +1325,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1366,7 +1410,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1455,7 +1503,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1536,7 +1588,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1619,7 +1675,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1702,7 +1762,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1792,7 +1856,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1875,7 +1943,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1965,7 +2037,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2048,7 +2124,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2138,7 +2218,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2221,7 +2305,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2311,7 +2399,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2394,7 +2486,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": false,
@ -2504,5 +2600,5 @@
"timezone": "",
"title": "Microsoft SQL Server Data Source Test",
"uid": "GlAqcPgmz",
"version": 57
"version": 58
}

View File

@ -2,7 +2,7 @@
"__inputs": [
{
"name": "DS_MYSQL",
"label": "Mysql",
"label": "MySQL",
"description": "",
"type": "datasource",
"pluginId": "mysql",
@ -20,19 +20,19 @@
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
"version": "5.0.0"
},
{
"type": "datasource",
"id": "mysql",
"name": "MySQL",
"version": "1.0.0"
"version": "5.0.0"
},
{
"type": "panel",
"id": "table",
"name": "Table",
"version": ""
"version": "5.0.0"
}
],
"annotations": {
@ -53,7 +53,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1518602729468,
"iteration": 1523372133566,
"links": [],
"panels": [
{
@ -118,7 +118,7 @@
],
"thresholds": [],
"timeFrom": null,
"timeShift": "1h",
"timeShift": null,
"title": "Average logins / $summarize",
"tooltip": {
"shared": true,
@ -150,7 +150,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -204,7 +208,7 @@
],
"thresholds": [],
"timeFrom": null,
"timeShift": "1h",
"timeShift": null,
"title": "Average payments started/ended / $summarize",
"tooltip": {
"shared": true,
@ -236,7 +240,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -284,7 +292,7 @@
],
"thresholds": [],
"timeFrom": null,
"timeShift": "1h",
"timeShift": null,
"title": "Max CPU / $summarize",
"tooltip": {
"shared": true,
@ -316,7 +324,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"columns": [],
@ -369,7 +381,7 @@
"target": ""
}
],
"timeShift": "1h",
"timeShift": null,
"title": "Values",
"transform": "table",
"type": "table"
@ -428,7 +440,6 @@
"auto_count": 5,
"auto_min": "10s",
"current": {
"selected": true,
"text": "1m",
"value": "1m"
},
@ -545,5 +556,5 @@
"timezone": "",
"title": "Grafana Fake Data Gen - MySQL",
"uid": "DGsCac3kz",
"version": 6
"version": 8
}

View File

@ -7,9 +7,6 @@
MYSQL_PASSWORD: password
ports:
- "3306:3306"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all]
fake-mysql-data:

View File

@ -0,0 +1,3 @@
FROM mysql:latest
ADD setup.sql /docker-entrypoint-initdb.d
CMD ["mysqld"]

View File

@ -7,14 +7,6 @@
"type": "datasource",
"pluginId": "mysql",
"pluginName": "MySQL"
},
{
"name": "DS_MSSQL_TEST",
"label": "MSSQL Test",
"description": "",
"type": "datasource",
"pluginId": "mssql",
"pluginName": "Microsoft SQL Server"
}
],
"__requires": [
@ -30,12 +22,6 @@
"name": "Graph",
"version": "5.0.0"
},
{
"type": "datasource",
"id": "mssql",
"name": "Microsoft SQL Server",
"version": "1.0.0"
},
{
"type": "datasource",
"id": "mysql",
@ -114,7 +100,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1521715720483,
"iteration": 1523320712115,
"links": [],
"panels": [
{
@ -349,7 +335,7 @@
{
"alias": "Time",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "time_sec",
"pattern": "time",
"type": "date"
},
{
@ -457,7 +443,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -536,7 +526,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -615,7 +609,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -694,7 +692,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -773,7 +775,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -852,7 +858,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -941,7 +951,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1034,7 +1048,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1123,7 +1141,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1204,7 +1226,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1293,7 +1319,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1374,7 +1404,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1463,7 +1497,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1544,7 +1582,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1634,14 +1676,18 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_MSSQL_TEST}",
"datasource": "${DS_MYSQL_TEST}",
"fill": 1,
"gridPos": {
"h": 8,
@ -1717,7 +1763,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1807,7 +1857,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1890,7 +1944,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1980,7 +2038,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2063,7 +2125,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2153,7 +2219,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2236,7 +2306,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": false,
@ -2315,8 +2389,8 @@
]
},
"time": {
"from": "2018-03-15T11:30:00.000Z",
"to": "2018-03-15T12:55:01.000Z"
"from": "2018-03-15T12:30:00.000Z",
"to": "2018-03-15T13:55:01.000Z"
},
"timepicker": {
"refresh_intervals": [
@ -2346,5 +2420,5 @@
"timezone": "",
"title": "MySQL Data Source Test",
"uid": "Hmf8FDkmz",
"version": 9
"version": 12
}

View File

@ -1,5 +1,6 @@
mysqltests:
image: mysql:latest
build:
context: blocks/mysql_tests
environment:
MYSQL_ROOT_PASSWORD: rootpass
MYSQL_DATABASE: grafana_tests
@ -7,7 +8,4 @@
MYSQL_PASSWORD: password
ports:
- "3306:3306"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
tmpfs: /var/lib/mysql:rw

View File

@ -0,0 +1,2 @@
CREATE DATABASE grafana_ds_tests;
GRANT ALL PRIVILEGES ON grafana_ds_tests.* TO 'grafana';

View File

@ -17,6 +17,7 @@ EXPOSE 389
VOLUME ["/etc/ldap", "/var/lib/ldap"]
COPY modules/ /etc/ldap.dist/modules
COPY prepopulate/ /etc/ldap.dist/prepopulate
COPY entrypoint.sh /entrypoint.sh

View File

@ -65,7 +65,7 @@ EOF
fi
if [[ -n "$SLAPD_ADDITIONAL_SCHEMAS" ]]; then
IFS=","; declare -a schemas=($SLAPD_ADDITIONAL_SCHEMAS)
IFS=","; declare -a schemas=($SLAPD_ADDITIONAL_SCHEMAS); unset IFS
for schema in "${schemas[@]}"; do
slapadd -n0 -F /etc/ldap/slapd.d -l "/etc/ldap/schema/${schema}.ldif" >/dev/null 2>&1
@ -73,14 +73,18 @@ EOF
fi
if [[ -n "$SLAPD_ADDITIONAL_MODULES" ]]; then
IFS=","; declare -a modules=($SLAPD_ADDITIONAL_MODULES)
IFS=","; declare -a modules=($SLAPD_ADDITIONAL_MODULES); unset IFS
for module in "${modules[@]}"; do
slapadd -n0 -F /etc/ldap/slapd.d -l "/etc/ldap/modules/${module}.ldif" >/dev/null 2>&1
done
fi
chown -R openldap:openldap /etc/ldap/slapd.d/
for file in `ls /etc/ldap/prepopulate/*.ldif`; do
slapadd -F /etc/ldap/slapd.d -l "$file"
done
chown -R openldap:openldap /etc/ldap/slapd.d/ /var/lib/ldap/ /var/run/slapd/
else
slapd_configs_in_env=`env | grep 'SLAPD_'`

View File

@ -0,0 +1,13 @@
# Notes on OpenLdap Docker Block
Any ldif files added to the prepopulate subdirectory will be automatically imported into the OpenLdap database.
The ldif files add three users, `ldapviewer`, `ldapeditor` and `ldapadmin`. Two groups, `admins` and `users`, are added that correspond with the group mappings in the default conf/ldap.toml. `ldapadmin` is a member of `admins` and `ldapeditor` is a member of `users`.
Note that users that are added here need to specify a `memberOf` attribute manually as well as the `member` attribute for the group. The `memberOf` module usually does this automatically (if you add a group in Apache Directory Studio for example) but this does not work in the entrypoint script as it uses the `slapadd` command to add entries before the server has started and before the `memberOf` module is loaded.
After adding ldif files to `prepopulate`:
1. Remove your current docker image: `docker rm docker_openldap_1`
2. Build: `docker-compose build`
3. `docker-compose up`

View File

@ -0,0 +1,10 @@
dn: cn=ldapadmin,dc=grafana,dc=org
mail: ldapadmin@grafana.com
userPassword: grafana
objectClass: person
objectClass: top
objectClass: inetOrgPerson
objectClass: organizationalPerson
sn: ldapadmin
cn: ldapadmin
memberOf: cn=admins,dc=grafana,dc=org

View File

@ -0,0 +1,5 @@
dn: cn=admins,dc=grafana,dc=org
cn: admins
member: cn=ldapadmin,dc=grafana,dc=org
objectClass: groupOfNames
objectClass: top

View File

@ -0,0 +1,10 @@
dn: cn=ldapeditor,dc=grafana,dc=org
mail: ldapeditor@grafana.com
userPassword: grafana
objectClass: person
objectClass: top
objectClass: inetOrgPerson
objectClass: organizationalPerson
sn: ldapeditor
cn: ldapeditor
memberOf: cn=users,dc=grafana,dc=org

View File

@ -0,0 +1,5 @@
dn: cn=users,dc=grafana,dc=org
cn: users
member: cn=ldapeditor,dc=grafana,dc=org
objectClass: groupOfNames
objectClass: top

View File

@ -0,0 +1,9 @@
dn: cn=ldapviewer,dc=grafana,dc=org
mail: ldapviewer@grafana.com
userPassword: grafana
objectClass: person
objectClass: top
objectClass: inetOrgPerson
objectClass: organizationalPerson
sn: ldapviewer
cn: ldapviewer

View File

@ -0,0 +1,3 @@
FROM postgres:latest
ADD setup.sql /docker-entrypoint-initdb.d
CMD ["postgres"]

View File

@ -100,7 +100,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1521725946837,
"iteration": 1523320929325,
"links": [],
"panels": [
{
@ -443,7 +443,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -522,7 +526,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -601,7 +609,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -680,7 +692,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -759,7 +775,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -838,7 +858,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -927,7 +951,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1008,7 +1036,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1097,7 +1129,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1178,7 +1214,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1267,7 +1307,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1348,7 +1392,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1437,7 +1485,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1518,7 +1570,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1608,7 +1664,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1691,7 +1751,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1781,7 +1845,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1864,7 +1932,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -1954,7 +2026,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2037,7 +2113,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2127,7 +2207,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
@ -2210,7 +2294,11 @@
"min": null,
"show": true
}
]
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": false,

View File

@ -1,5 +1,6 @@
postgrestest:
image: postgres:latest
build:
context: blocks/postgres_tests
environment:
POSTGRES_USER: grafanatest
POSTGRES_PASSWORD: grafanatest

View File

@ -0,0 +1,3 @@
CREATE DATABASE grafanadstest;
REVOKE CONNECT ON DATABASE grafanadstest FROM PUBLIC;
GRANT CONNECT ON DATABASE grafanadstest TO grafanatest;

View File

@ -22,6 +22,6 @@ log() {
log $RUN_CMD
$RUN_CMD
# Exit immidiately in case of any errors or when we have interactive terminal
# Exit immediately in case of any errors or when we have interactive terminal
if [[ $? != 0 ]] || test -t 0; then exit $?; fi
log

View File

@ -138,6 +138,7 @@ datasources:
```
#### Custom Settings per Datasource
Please refer to each datasource documentation for specific provisioning examples.
| Datasource | Misc |
| ---- | ---- |
@ -205,7 +206,7 @@ When Grafana starts, it will update/insert all dashboards available in the confi
### Reuseable Dashboard Urls
If the dashboard in the json file contains an [uid](/reference/dashboard/#json-fields), Grafana will force insert/update on that uid. This allows you to migrate dashboards betweens Grafana instances and provisioning Grafana from configuration without breaking the urls given since the new dashboard url uses the uid as identifer.
If the dashboard in the json file contains an [uid](/reference/dashboard/#json-fields), Grafana will force insert/update on that uid. This allows you to migrate dashboards betweens Grafana instances and provisioning Grafana from configuration without breaking the urls given since the new dashboard url uses the uid as identifier.
When Grafana starts, it will update/insert all dashboards available in the configured folders. If you modify the file, the dashboard will also be updated.
By default Grafana will delete dashboards in the database if the file is removed. You can disable this behavior using the `disableDeletion` setting.

View File

@ -153,10 +153,10 @@ Prometheus Alertmanager | `prometheus-alertmanager` | no
# Enable images in notifications {#external-image-store}
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessable (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessible (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
Amazon S3, Webdav, Google Cloud Storage and Azure Blob Storage. So to set that up you need to configure the [external image uploader](/installation/configuration/#external-image-storage) in your grafana-server ini config file.
Be aware that some notifiers requires public access to the image to be able to include it in the notification. So make sure to enable public access to the images. If your using local image uploader, your Grafana instance need to be accessible by the internet.
Be aware that some notifiers requires public access to the image to be able to include it in the notification. So make sure to enable public access to the images. If you're using local image uploader, your Grafana instance need to be accessible by the internet.
Currently only the Email Channels attaches images if no external image store is specified. To include images in alert notifications for other channels then you need to set up an external image store.

View File

@ -110,7 +110,7 @@ to `Keep Last State` in order to basically ignore them.
## Notifications
In alert tab you can also specify alert rule notifications along with a detailed messsage about the alert rule.
In alert tab you can also specify alert rule notifications along with a detailed message about the alert rule.
The message can contain anything, information about how you might solve the issue, link to runbook, etc.
The actual notifications are configured and shared between multiple alerts. Read the

View File

@ -1,6 +1,6 @@
+++
title = "Contributor Licence Agreement (CLA)"
description = "Contributer Licence Agreement (CLA)"
description = "Contributor Licence Agreement (CLA)"
type = "docs"
aliases = ["/project/cla", "docs/contributing/cla.html"]
[menu.docs]
@ -101,4 +101,4 @@ TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU [OR US]
<br>
<br>
<br>
This CLA aggreement is based on the [Harmony Contributor Aggrement Template (combined)](http://www.harmonyagreements.org/agreements.html), [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/)
This CLA agreement is based on the [Harmony Contributor Aggrement Template (combined)](http://www.harmonyagreements.org/agreements.html), [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/)

View File

@ -43,6 +43,40 @@ server is running on AWS you can use IAM Roles and authentication will be handle
Checkout AWS docs on [IAM Roles](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
## IAM Policies
Grafana needs permissions granted via IAM to be able to read CloudWatch metrics
and EC2 tags/instances. You can attach these permissions to IAM roles and
utilize Grafana's built-in support for assuming roles.
Here is a minimal policy example:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowReadingMetricsFromCloudWatch",
"Effect": "Allow",
"Action": [
"cloudwatch:ListMetrics",
"cloudwatch:GetMetricStatistics"
],
"Resource": "*"
},
{
"Sid": "AllowReadingTagsFromEC2",
"Effect": "Allow",
"Action": [
"ec2:DescribeTags",
"ec2:DescribeInstances"
],
"Resource": "*"
}
]
}
```
### AWS credentials file
Create a file at `~/.aws/credentials`. That is the `HOME` path for user running grafana-server.
@ -173,3 +207,37 @@ Amazon provides 1 million CloudWatch API requests each month at no additional ch
it costs $0.01 per 1,000 GetMetricStatistics or ListMetrics requests. For each query Grafana will
issue a GetMetricStatistics request and every time you pick a dimension in the query editor
Grafana will issue a ListMetrics request.
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
Using a credentials file
```yaml
apiVersion: 1
datasources:
- name: Cloudwatch
type: cloudwatch
jsonData:
authType: credentials
defaultRegion: eu-west-2
```
Using `accessKey` and `secretKey`
```yaml
apiVersion: 1
datasources:
- name: Cloudwatch
type: cloudwatch
jsonData:
authType: keys
defaultRegion: eu-west-2
secureJsonData:
accessKey: "<your access key>"
secretKey: "<your secret key>"
```

View File

@ -55,6 +55,22 @@ a time pattern for the index name or a wildcard.
Be sure to specify your Elasticsearch version in the version selection dropdown. This is very important as there are differences how queries are composed. Currently only 2.x and 5.x
are supported.
### Min time interval
A lower limit for the auto group by time interval. Recommended to be set to write frequency, for example `1m` if your data is written every minute.
This option can also be overridden/configured in a dashboard panel under data source options. It's important to note that this value **needs** to be formated as a
number followed by a valid time identifier, e.g. `1m` (1 minute) or `30s` (30 seconds). The following time identifiers are supported:
Identifier | Description
------------ | -------------
`y` | year
`M` | month
`w` | week
`d` | day
`h` | hour
`m` | minute
`s` | second
`ms` | millisecond
## Metric Query editor
![](/img/docs/elasticsearch/query_editor.png)
@ -137,3 +153,23 @@ Query | You can leave the search query blank or specify a lucene query
Time | The name of the time field, needs to be date field.
Text | Event description field.
Tags | Optional field name to use for event tags (can be an array or a CSV string).
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Elastic
type: elasticsearch
access: proxy
database: "[metrics-]YYYY.MM.DD"
url: http://localhost:9200
jsonData:
interval: Daily
timeField: "@timestamp"
```

View File

@ -120,3 +120,21 @@ queries via the Dashboard menu / Annotations view.
Graphite supports two ways to query annotations. A regular metric query, for this you use the `Graphite query` textbox. A Graphite events query, use the `Graphite event tags` textbox,
specify a tag or wildcard (leave empty should also work)
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Graphite
type: graphite
access: proxy
url: http://localhost:8080
jsonData:
graphiteVersion: "1.1"
```

View File

@ -39,6 +39,22 @@ Proxy access means that the Grafana backend will proxy all requests from the bro
`grafana-server`. This means that the URL you specify needs to be accessible from the server you are running Grafana on. Proxy access
mode is also more secure as the username & password will never reach the browser.
### Min time interval
A lower limit for the auto group by time interval. Recommended to be set to write frequency, for example `1m` if your data is written every minute.
This option can also be overridden/configured in a dashboard panel under data source options. It's important to note that this value **needs** to be formated as a
number followed by a valid time identifier, e.g. `1m` (1 minute) or `30s` (30 seconds). The following time identifiers are supported:
Identifier | Description
------------ | -------------
`y` | year
`M` | month
`w` | week
`d` | day
`h` | hour
`m` | minute
`s` | second
`ms` | millisecond
## Query Editor
{{< docs-imagebox img="/img/docs/v45/influxdb_query_still.png" class="docs-image--no-shadow" animated-gif="/img/docs/v45/influxdb_query.gif" >}}
@ -174,3 +190,22 @@ SELECT title, description from events WHERE $timeFilter order asc
For InfluxDB you need to enter a query like in the above example. You need to have the ```where $timeFilter```
part. If you only select one column you will not need to enter anything in the column mapping fields. The
Tags field can be a comma separated string.
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: InfluxDB
type: influxdb
access: proxy
database: site
user: grafana
password: grafana
url: http://localhost:8086
```

View File

@ -225,3 +225,21 @@ tags | Optional field name to use for event tags as a comma separated string.
## Alerting
Time series queries should work in alerting conditions. Table formatted queries is not yet supported in alert rule conditions.
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: MySQL
type: mysql
url: localhost:3306
database: grafana
user: grafana
password: password
```

View File

@ -78,7 +78,7 @@ the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` o
### Nested Templating
One template variable can be used to filter tag values for another template varible. First parameter is the metric name,
One template variable can be used to filter tag values for another template variable. First parameter is the metric name,
second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables.
Some examples are mentioned below to make nested template queries work successfully.
@ -88,3 +88,22 @@ Query | Description
*tag_values(cpu, hostanme, env=$env, region=$region)* | Return tag values for cpu metric, selected env tag value, selected region tag value and tag key hostname
For details on OpenTSDB metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html)
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: OpenTsdb
type: opentsdb
access: proxy
url: http://localhost:4242
jsonData:
tsdbResolution: 1
tsdbVersion: 1
```

View File

@ -217,3 +217,25 @@ tags | Optional field name to use for event tags as a comma separated string.
Time series queries should work in alerting conditions. Table formatted queries is not yet supported in alert rule
conditions.
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Postgres
type: postgres
url: localhost:5432
database: grafana
user: grafana
secureJsonData:
password: "Password!"
jsonData:
sslmode: "disable" # disable/require/verify-ca/verify-full
```

View File

@ -34,7 +34,7 @@ Name | Description
*Basic Auth* | Enable basic authentication to the Prometheus data source.
*User* | Name of your Prometheus user
*Password* | Database user's password
*Scrape interval* | This will be used as a lower limit for the Prometheus step query parameter. Default value is 15s.
*Scrape interval* | This will be used as a lower limit for the Prometheus step query parameter. Default value is 15s.
## Query editor
@ -100,3 +100,19 @@ The step option is useful to limit the number of events returned from your query
## Getting Grafana metrics into Prometheus
Since 4.6.0 Grafana exposes metrics for Prometheus on the `/metrics` endpoint. We also bundle a dashboard within Grafana so you can get started viewing your metrics faster. You can import the bundled dashboard by going to the data source edit page and click the dashboard tab. There you can find a dashboard for Grafana and one for Prometheus. Import and start viewing all the metrics!
## Configure datasource with provisioning
It's now possible to configure datasources using config files with Grafanas provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
Here are some provisioning examples for this datasource.
```yaml
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://localhost:9090
```

View File

@ -14,7 +14,7 @@ weight = 4
{{< docs-imagebox img="/img/docs/v45/alert-list-panel.png" max-width="850px" >}}
The alert list panel allows you to display your dashbords alerts. The list can be configured to show current state or recent state changes. You can read more about alerts [here](http://docs.grafana.org/alerting/rules).
The alert list panel allows you to display your dashboards alerts. The list can be configured to show current state or recent state changes. You can read more about alerts [here](http://docs.grafana.org/alerting/rules).
## Alert List Options

View File

@ -25,7 +25,7 @@ The dashboard list panel allows you to display dynamic links to other dashboards
1. **Starred**: The starred dashboard selection displays starred dashboards in alphabetical order.
2. **Recently Viewed**: The recently viewed dashboard selection displays recently viewed dashboards in alphabetical order.
3. **Search**: The search dashboard selection displays dashboards by search query or tag(s).
4. **Show Headings**: When show headings is ticked the choosen list selection(Starred, Recently Viewed, Search) is shown as a heading.
4. **Show Headings**: When show headings is ticked the chosen list selection(Starred, Recently Viewed, Search) is shown as a heading.
5. **Max Items**: Max items set the maximum of items in a list.
6. **Query**: Here is where you enter your query you want to search by. Queries are case-insensitive, and partial values are accepted.
7. **Tags**: Here is where you enter your tag(s) you want to search by. Note that existing tags will not appear as you type, and *are* case sensitive. To see a list of existing tags, you can always return to the dashboard, open the Dashboard Picker at the top and click `tags` link in the search bar.

View File

@ -30,7 +30,7 @@ The singlestat panel has a normal query editor to allow you define your exact me
* **total** - The sum of all the non-null values in the series
* **first** - The first value in the series
* **delta** - The total incremental increase (of a counter) in the series. An attempt is made to account for counter resets, but this will only be accurate for single instance metrics. Used to show total counter increase in time series.
* **diff** - The difference betwen 'current' (last value) and 'first'.
* **diff** - The difference between 'current' (last value) and 'first'.
* **range** - The difference between 'min' and 'max'. Useful the show the range of change for a gauge.
2. **Prefix/Postfix**: The Prefix/Postfix fields let you define a custom label to appear *before/after* the value. The `$__name` variable can be used here to use the series name or alias from the metric query.
3. **Units**: Units are appended to the the Singlestat within the panel, and will respect the color and threshold settings for the value.
@ -70,7 +70,7 @@ Gauges gives a clear picture of how high a value is in it's context. It's a grea
{{< docs-imagebox img="/img/docs/v45/singlestat-gauge-options.png" max-width="500px" class="docs-image--right docs-image--no-shadow">}}
1. **Show**: The show checkbox will toggle wether the gauge is shown in the panel. When unselected, only the Singlestat value will appear.
1. **Show**: The show checkbox will toggle whether the gauge is shown in the panel. When unselected, only the Singlestat value will appear.
2. **Min/Max**: This sets the start and end point for the gauge.
3. **Threshold Labels**: Check if you want to show the threshold labels. Thresholds are set in the color options.
4. **Threshold Markers**: Check if you want to have a second meter showing the thresholds.

View File

@ -15,7 +15,7 @@ support for multiple Cloudwatch credentials.
<img src="/assets/img/features/table-panel.png">
The new table panel is very flexible, supporting both multiple modes for time series as well as for
table, annotation and raw JSON data. It also provides date formating and value formating and coloring options.
table, annotation and raw JSON data. It also provides date formatting and value formatting and coloring options.
### Time series to rows

View File

@ -33,7 +33,7 @@ You can enable/disable the shared tooltip from the dashboard settings menu or cy
{{< imgbox max-width="60%" img="/img/docs/v41/helptext_for_panel_settings.png" caption="Hovering help text" >}}
You can set a help text in the general tab on any panel. The help text is using Markdown to enable better formating and linking to other sites that can provide more information.
You can set a help text in the general tab on any panel. The help text is using Markdown to enable better formatting and linking to other sites that can provide more information.
<div class="clearfix"></div>

View File

@ -12,7 +12,7 @@ weight = -4
# What's New in Grafana v4.5
## Hightlights
## Highlights
### New prometheus query editor
@ -62,7 +62,7 @@ Datas source selection & options & help are now above your metric queries.
### Minor Changes
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
* **InfluxDB**: Added parenthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
## Bug Fixes

View File

@ -45,7 +45,7 @@ This makes exploring and filtering Prometheus data much easier.
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Graph**: Add support for local formatting in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/2764)
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)

View File

@ -180,14 +180,14 @@ Content-Type: application/json
## Delete Annotation By Id
`DELETE /api/annotation/:id`
`DELETE /api/annotations/:id`
Deletes the annotation that matches the specified id.
**Example Request**:
```http
DELETE /api/annotation/1 HTTP/1.1
DELETE /api/annotations/1 HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
@ -204,14 +204,14 @@ Content-Type: application/json
## Delete Annotation By RegionId
`DELETE /api/annotation/region/:id`
`DELETE /api/annotations/region/:id`
Deletes the annotation that matches the specified region id. A region is an annotation that covers a timerange and has a start and end time. In the Grafana database, this is a stored as two annotations connected by a region id.
**Example Request**:
```http
DELETE /api/annotation/region/1 HTTP/1.1
DELETE /api/annotations/region/1 HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk

View File

@ -307,7 +307,7 @@ Content-Type: application/json
`PUT /api/orgs/:orgId`
Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented yet.
Update Organisation, fields *Address 1*, *Address 2*, *City* are not implemented yet.
**Example Request**:
@ -436,4 +436,4 @@ HTTP/1.1 200
Content-Type: application/json
{"message":"User removed from organization"}
```
```

View File

@ -482,7 +482,7 @@ Set api_url to the resource that returns [OpenID UserInfo](https://connect2id.co
First set up Grafana as an OpenId client "webapplication" in Okta. Then set the Base URIs to `https://<grafana domain>/` and set the Login redirect URIs to `https://<grafana domain>/login/generic_oauth`.
Finaly set up the generic oauth module like this:
Finally set up the generic oauth module like this:
```bash
[auth.generic_oauth]
name = Okta

View File

@ -34,7 +34,7 @@ sudo dpkg -i grafana_5.0.4_amd64.deb
Add the following line to your `/etc/apt/sources.list` file.
```bash
deb https://packagecloud.io/grafana/stable/debian/ jessie main
deb https://packagecloud.io/grafana/stable/debian/ stretch main
```
Use the above line even if you are on Ubuntu or another Debian version.
@ -42,7 +42,7 @@ There is also a testing repository if you want beta or release
candidates.
```bash
deb https://packagecloud.io/grafana/testing/debian/ jessie main
deb https://packagecloud.io/grafana/testing/debian/ stretch main
```
Then add the [Package Cloud](https://packagecloud.io/grafana) key. This

View File

@ -12,7 +12,7 @@ weight = 4
# Installing using Docker
Grafana is very easy to install and run using the offical docker container.
Grafana is very easy to install and run using the official docker container.
```bash
$ docker run -d -p 3000:3000 grafana/grafana

View File

@ -25,7 +25,7 @@ Before upgrading it can be a good idea to backup your Grafana database. This wil
If you use sqlite you only need to make a backup of your `grafana.db` file. This is usually located at `/var/lib/grafana/grafana.db` on unix system.
If you are unsure what database you use and where it is stored check you grafana configuration file. If you
installed grafana to custom location using a binary tar/zip it is usally in `<grafana_install_dir>/data`.
installed grafana to custom location using a binary tar/zip it is usually in `<grafana_install_dir>/data`.
#### mysql

View File

@ -71,13 +71,13 @@ Each field in the dashboard JSON is explained below with its usage:
| **timepicker** | timepicker metadata, see [timepicker section](#timepicker) for details |
| **templating** | templating metadata, see [templating section](#templating) for details |
| **annotations** | annotations metadata, see [annotations section](#annotations) for details |
| **schemaVersion** | version of the JSON schema (integer), incremented each time a Grafana update brings changes to the said schema |
| **schemaVersion** | version of the JSON schema (integer), incremented each time a Grafana update brings changes to said schema |
| **version** | version of the dashboard (integer), incremented each time the dashboard is updated |
| **panels** | panels array, see below for detail. |
## Panels
Panels are the building blocks a dashboard. It consists of datasource queries, type of graphs, aliases, etc. Panel JSON consists of an array of JSON objects, each representing a different panel. Most of the fields are common for all panels but some fields depends on the panel type. Following is an example of panel JSON of a text panel.
Panels are the building blocks of a dashboard. It consists of datasource queries, type of graphs, aliases, etc. Panel JSON consists of an array of JSON objects, each representing a different panel. Most of the fields are common for all panels but some fields depend on the panel type. Following is an example of panel JSON of a text panel.
```json
"panels": [
@ -105,7 +105,7 @@ The gridPos property describes the panel size and position in grid coordinates.
- `x` The x position, in same unit as `w`.
- `y` The y position, in same unit as `h`.
The grid has a negative gravity that moves panels up if there i empty space above a panel.
The grid has a negative gravity that moves panels up if there is empty space above a panel.
### timepicker
@ -161,7 +161,7 @@ Usage of the fields is explained below:
### templating
`templating` fields contains array of template variables with their saved values along with some other metadata, for example:
The `templating` field contains an array of template variables with their saved values along with some other metadata, for example:
```json
"templating": {
@ -236,7 +236,7 @@ Usage of the above mentioned fields in the templating section is explained below
| Name | Usage |
| ---- | ----- |
| **enable** | whether templating is enabled or not |
| **list** | an array of objects representing, each representing one template variable |
| **list** | an array of objects each representing one template variable |
| **allFormat** | format to use while fetching all values from datasource, eg: `wildcard`, `glob`, `regex`, `pipe`, etc. |
| **current** | shows current selected variable text/value on the dashboard |
| **datasource** | shows datasource for the variables |

View File

@ -49,7 +49,7 @@ Click the back button to rewind to the previous Dashboard in the Playlist.
In TV mode the top navbar, row & panel controls will all fade to transparent.
This happens automatically after one minute of user inactivity but can also be toggled manually
with the `d v` sequence shortcut. Any mouse movement or keyboard action will
with the `d v` sequence shortcut, or by appending the parameter `?inactive` to the dashboard URL. Any mouse movement or keyboard action will
restore navbar & controls.
Another feature is the kiosk mode - in kiosk mode the navbar is completely hidden/removed from view. This can be enabled with the `d k`

View File

@ -168,7 +168,7 @@ Option | Description
*Include All option* | Add a special `All` option whose value includes all options.
*Custom all value* | By default the `All` value will include all options in combined expression. This can become very long and can have performance problems. Many times it can be better to specify a custom all value, like a wildcard regex. To make it possible to have custom regex, globs or lucene syntax in the **Custom all value** option it is never escaped so you will have to think avbout what is a valid value for your data source.
### Formating multiple values
### Formatting multiple values
Interpolating a variable with multiple values selected is tricky as it is not straight forward how to format the multiple values to into a string that
is valid in the given context where the variable is used. Grafana tries to solve this by allowing each data source plugin to
@ -186,7 +186,7 @@ break the regex expression.
**Elasticsearch** uses lucene query syntax, so the same variable would, in this case, be formatted as `("host1" OR "host2" OR "host3")`. In this case every value
needs to be escaped so that the value can contain lucene control words and quotation marks.
#### Formating troubles
#### Formatting troubles
Automatic escaping & formatting can cause problems and it can be tricky to grasp the logic is behind it.
Especially for InfluxDB and Prometheus where the use of regex syntax requires that the variable is used in regex operator context.

View File

@ -108,7 +108,7 @@ In this example we use Apache as a reverseProxy in front of Grafana. Apache hand
* The next part of the configuration is the tricky part. We use Apaches rewrite engine to create our **X-WEBAUTH-USER header**, populated with the authenticated user.
* **RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER}, NS]**: This line is a little bit of magic. What it does, is for every request use the rewriteEngines look-ahead (LA-U) feature to determine what the REMOTE_USER variable would be set to after processing the request. Then assign the result to the variable PROXY_USER. This is neccessary as the REMOTE_USER variable is not available to the RequestHeader function.
* **RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER}, NS]**: This line is a little bit of magic. What it does, is for every request use the rewriteEngines look-ahead (LA-U) feature to determine what the REMOTE_USER variable would be set to after processing the request. Then assign the result to the variable PROXY_USER. This is necessary as the REMOTE_USER variable is not available to the RequestHeader function.
* **RequestHeader set X-WEBAUTH-USER “%{PROXY_USER}e”**: With the authenticated username now stored in the PROXY_USER variable, we create a new HTTP request header that will be sent to our backend Grafana containing the username.
@ -149,7 +149,7 @@ auto_sign_up = true
##### Grafana Container
For this example, we use the offical Grafana docker image available at [Docker Hub](https://hub.docker.com/r/grafana/grafana/)
For this example, we use the official Grafana docker image available at [Docker Hub](https://hub.docker.com/r/grafana/grafana/)
* Create a file `grafana.ini` with the following contents
@ -166,7 +166,7 @@ header_property = username
auto_sign_up = true
```
* Launch the Grafana container, using our custom grafana.ini to replace `/etc/grafana/grafana.ini`. We dont expose any ports for this container as it will only be connected to by our Apache container.
* Launch the Grafana container, using our custom grafana.ini to replace `/etc/grafana/grafana.ini`. We don't expose any ports for this container as it will only be connected to by our Apache container.
```bash
docker run -i -v $(pwd)/grafana.ini:/etc/grafana/grafana.ini --name grafana grafana/grafana
@ -174,7 +174,7 @@ docker run -i -v $(pwd)/grafana.ini:/etc/grafana/grafana.ini --name grafana graf
### Apache Container
For this example we use the offical Apache docker image available at [Docker Hub](https://hub.docker.com/_/httpd/)
For this example we use the official Apache docker image available at [Docker Hub](https://hub.docker.com/_/httpd/)
* Create a file `httpd.conf` with the following contents
@ -244,4 +244,4 @@ ProxyPassReverse / http://grafana:3000/
### Use grafana.
With our Grafana and Apache containers running, you can now connect to http://localhost/ and log in using the username/password we created in the htpasswd file.
With our Grafana and Apache containers running, you can now connect to http://localhost/ and log in using the username/password we created in the htpasswd file.

View File

@ -102,12 +102,11 @@
"watch": "webpack --progress --colors --watch --config scripts/webpack/webpack.dev.js",
"build": "grunt build",
"test": "grunt test",
"test:coverage": "grunt test --coverage=true",
"lint": "tslint -c tslint.json --project tsconfig.json --type-check",
"karma": "node ./node_modules/grunt-cli/bin/grunt karma:dev",
"jest": "node ./node_modules/jest-cli/bin/jest.js --notify --watch",
"api-tests": "node ./node_modules/jest-cli/bin/jest.js --notify --watch --config=tests/api/jest.js",
"precommit": "lint-staged && node ./node_modules/grunt-cli/bin/grunt precommit"
"karma": "grunt karma:dev",
"jest": "jest --notify --watch",
"api-tests": "jest --notify --watch --config=tests/api/jest.js",
"precommit": "lint-staged && grunt precommit"
},
"lint-staged": {
"*.{ts,tsx}": [
@ -136,6 +135,7 @@
"angular-route": "^1.6.6",
"angular-sanitize": "^1.6.6",
"babel-polyfill": "^6.26.0",
"baron": "^3.0.3",
"brace": "^0.10.0",
"classnames": "^2.2.5",
"clipboard": "^1.7.1",
@ -151,7 +151,6 @@
"moment": "^2.18.1",
"mousetrap": "^1.6.0",
"mousetrap-global-bind": "^1.1.0",
"perfect-scrollbar": "^1.2.0",
"prop-types": "^15.6.0",
"react": "^16.2.0",
"react-dom": "^16.2.0",

View File

@ -118,9 +118,14 @@ func setIndexViewData(c *m.ReqContext) (*dtos.IndexViewData, error) {
})
if c.IsSignedIn {
// Only set login if it's different from the name
var login string
if c.SignedInUser.Login != c.SignedInUser.NameOrFallback() {
login = c.SignedInUser.Login
}
profileNode := &dtos.NavLink{
Text: c.SignedInUser.NameOrFallback(),
SubTitle: c.SignedInUser.Login,
SubTitle: login,
Id: "profile",
Img: data.User.GravatarUrl,
Url: setting.AppSubUrl + "/profile",
@ -284,6 +289,7 @@ func setIndexViewData(c *m.ReqContext) (*dtos.IndexViewData, error) {
data.NavTree = append(data.NavTree, &dtos.NavLink{
Text: "Help",
SubTitle: fmt.Sprintf(`Grafana v%s (%s)`, setting.BuildVersion, setting.BuildCommit),
Id: "help",
Url: "#",
Icon: "gicon gicon-question",

View File

@ -75,7 +75,7 @@ func GetTestDataScenarios(c *m.ReqContext) Response {
return JSON(200, &result)
}
// Genereates a index out of range error
// Generates a index out of range error
func GenerateError(c *m.ReqContext) Response {
var array []string
return JSON(200, array[20])

View File

@ -33,7 +33,7 @@ func ValidateOrgPlaylist(c *m.ReqContext) {
return
}
if len(items) == 0 {
if len(items) == 0 && c.Context.Req.Method != "DELETE" {
c.JsonApiErr(404, "Playlist is empty", itemsErr)
return
}

View File

@ -189,12 +189,6 @@ func (proxy *DataSourceProxy) getDirector() func(req *http.Request) {
}
func (proxy *DataSourceProxy) validateRequest() error {
if proxy.ds.Type == m.DS_INFLUXDB {
if proxy.ctx.Query("db") != proxy.ds.Database {
return errors.New("Datasource is not configured to allow this database")
}
}
if !checkWhiteList(proxy.ctx, proxy.targetUrl.Host) {
return errors.New("Target url is not a valid target")
}

View File

@ -48,7 +48,7 @@ type StaticOptions struct {
// Expires defines which user-defined function to use for producing a HTTP Expires Header
// https://developers.google.com/speed/docs/insights/LeverageBrowserCaching
AddHeaders func(ctx *macaron.Context)
// FileSystem is the interface for supporting any implmentation of file system.
// FileSystem is the interface for supporting any implementation of file system.
FileSystem http.FileSystem
}

View File

@ -33,7 +33,7 @@ func validateInput(c CommandLine, pluginFolder string) error {
fileInfo, err := os.Stat(pluginsDir)
if err != nil {
if err = os.MkdirAll(pluginsDir, os.ModePerm); err != nil {
return errors.New(fmt.Sprintf("pluginsDir (%s) is not a directory", pluginsDir))
return errors.New(fmt.Sprintf("pluginsDir (%s) is not a writable directory", pluginsDir))
}
return nil
}

View File

@ -111,7 +111,7 @@ func (g *GrafanaServerImpl) initLogging() {
})
if err != nil {
g.log.Error(err.Error())
fmt.Fprintf(os.Stderr, "Failed to start grafana. error: %s\n", err.Error())
os.Exit(1)
}

View File

@ -22,7 +22,7 @@ const (
)
var (
// changeTypeToSymbol is used for populating the terminating characer in
// changeTypeToSymbol is used for populating the terminating character in
// the diff
changeTypeToSymbol = map[ChangeType]string{
ChangeNil: "",

View File

@ -76,10 +76,10 @@ func TestFirst(t *testing.T) {
assert.True(s == "fallback", "must get string return fallback")
s, err = j.GetString("name")
assert.True(s == "anton" && err == nil, "name shoud match")
assert.True(s == "anton" && err == nil, "name should match")
s, err = j.GetString("address", "street")
assert.True(s == "Street 42" && err == nil, "street shoud match")
assert.True(s == "Street 42" && err == nil, "street should match")
//log.Println("s: ", s.String())
_, err = j.GetNumber("age")

View File

@ -35,7 +35,7 @@ var (
slash = []byte("/")
)
// stack returns a nicely formated stack frame, skipping skip frames
// stack returns a nicely formatted stack frame, skipping skip frames
func stack(skip int) []byte {
buf := new(bytes.Buffer) // the returned data
// As we loop, we open files and read them. These variables record the currently

View File

@ -157,7 +157,7 @@ func NewDashboardFromJson(data *simplejson.Json) *Dashboard {
return dash
}
// GetDashboardModel turns the command into the savable model
// GetDashboardModel turns the command into the saveable model
func (cmd *SaveDashboardCommand) GetDashboardModel() *Dashboard {
dash := NewDashboardFromJson(cmd.Dashboard)
userId := cmd.UserId

View File

@ -32,7 +32,7 @@ type Folder struct {
HasAcl bool
}
// GetDashboardModel turns the command into the savable model
// GetDashboardModel turns the command into the saveable model
func (cmd *CreateFolderCommand) GetDashboardModel(orgId int64, userId int64) *Dashboard {
dashFolder := NewDashboardFolder(strings.TrimSpace(cmd.Title))
dashFolder.OrgId = orgId

View File

@ -12,7 +12,7 @@ import (
func TestPluginScans(t *testing.T) {
Convey("When scaning for plugins", t, func() {
Convey("When scanning for plugins", t, func() {
setting.StaticRootPath, _ = filepath.Abs("../../public/")
setting.Cfg = ini.Empty()
err := initPlugins(context.Background())

View File

@ -37,7 +37,7 @@ func GetPluginSettings(orgId int64) (map[string]*m.PluginSettingInfoDTO, error)
// if it's included in app check app settings
if pluginDef.IncludedInAppId != "" {
// app componets are by default disabled
// app components are by default disabled
opt.Enabled = false
if appSettings, ok := pluginMap[pluginDef.IncludedInAppId]; ok {

View File

@ -10,7 +10,7 @@ import (
)
type FakeEvalHandler struct {
SuccessCallID int // 0 means never sucess
SuccessCallID int // 0 means never success
CallNb int
}
@ -87,7 +87,7 @@ func TestEngineProcessJob(t *testing.T) {
Convey("Should trigger as many retries as needed", func() {
Convey("never sucess -> max retries number", func() {
Convey("never success -> max retries number", func() {
expectedAttempts := alertMaxAttempts
evalHandler := NewFakeEvalHandler(0)
engine.evalHandler = evalHandler
@ -96,7 +96,7 @@ func TestEngineProcessJob(t *testing.T) {
So(evalHandler.CallNb, ShouldEqual, expectedAttempts)
})
Convey("always sucess -> never retry", func() {
Convey("always success -> never retry", func() {
expectedAttempts := 1
evalHandler := NewFakeEvalHandler(1)
engine.evalHandler = evalHandler
@ -105,7 +105,7 @@ func TestEngineProcessJob(t *testing.T) {
So(evalHandler.CallNb, ShouldEqual, expectedAttempts)
})
Convey("some errors before sucess -> some retries", func() {
Convey("some errors before success -> some retries", func() {
expectedAttempts := int(math.Ceil(float64(alertMaxAttempts) / 2))
evalHandler := NewFakeEvalHandler(expectedAttempts)
engine.evalHandler = evalHandler

View File

@ -72,7 +72,10 @@ func (this *DingDingNotifier) Notify(evalContext *alerting.EvalContext) error {
this.log.Error("Failed to create Json data", "error", err, "dingding", this.Name)
}
body, _ := bodyJSON.MarshalJSON()
body, err := bodyJSON.MarshalJSON()
if err != nil {
return err
}
cmd := &m.SendWebhookSync{
Url: this.Url,

View File

@ -111,7 +111,7 @@ func (this *HipChatNotifier) Notify(evalContext *alerting.EvalContext) error {
}
message := ""
if evalContext.Rule.State != models.AlertStateOK { //dont add message when going back to alert state ok.
if evalContext.Rule.State != models.AlertStateOK { //don't add message when going back to alert state ok.
message += " " + evalContext.Rule.Message
}

View File

@ -129,7 +129,7 @@ func (this *SlackNotifier) Notify(evalContext *alerting.EvalContext) error {
}
message := this.Mention
if evalContext.Rule.State != m.AlertStateOK { //dont add message when going back to alert state ok.
if evalContext.Rule.State != m.AlertStateOK { //don't add message when going back to alert state ok.
message += " " + evalContext.Rule.Message
}
image_url := ""

View File

@ -13,7 +13,7 @@ func init() {
alerting.RegisterNotifier(&alerting.NotifierPlugin{
Type: "teams",
Name: "Microsoft Teams",
Description: "Sends notifications using Incomming Webhook connector to Microsoft Teams",
Description: "Sends notifications using Incoming Webhook connector to Microsoft Teams",
Factory: NewTeamsNotifier,
OptionsTemplate: `
<h3 class="page-heading">Teams settings</h3>
@ -76,7 +76,7 @@ func (this *TeamsNotifier) Notify(evalContext *alerting.EvalContext) error {
}
message := this.Mention
if evalContext.Rule.State != m.AlertStateOK { //dont add message when going back to alert state ok.
if evalContext.Rule.State != m.AlertStateOK { //don't add message when going back to alert state ok.
message += " " + evalContext.Rule.Message
} else {
message += " " // summary must not be empty

View File

@ -100,7 +100,7 @@ func TestTelegramNotifier(t *testing.T) {
So(caption, ShouldContainSubstring, "Some kind of message that is too long for appending to our pretty little message, this line is actually exactly 197 chars long and I will get there in the end I promise ")
})
Convey("Metrics should be skipped if they dont fit", func() {
Convey("Metrics should be skipped if they don't fit", func() {
evalContext := alerting.NewEvalContext(nil, &alerting.Rule{
Name: "This is an alarm",
Message: "Some kind of message that is too long for appending to our pretty little message, this line is actually exactly 197 chars long and I will get there in the end I ",

View File

@ -56,7 +56,7 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error {
if err := bus.Dispatch(cmd); err != nil {
if err == m.ErrCannotChangeStateOnPausedAlert {
handler.log.Error("Cannot change state on alert thats pause", "error", err)
handler.log.Error("Cannot change state on alert that's paused", "error", err)
return err
}

View File

@ -58,7 +58,7 @@ func (s *SchedulerImpl) Tick(tickTime time.Time, execQueue chan *Job) {
if job.OffsetWait && now%job.Offset == 0 {
job.OffsetWait = false
s.enque(job, execQueue)
s.enqueue(job, execQueue)
continue
}
@ -66,13 +66,13 @@ func (s *SchedulerImpl) Tick(tickTime time.Time, execQueue chan *Job) {
if job.Offset > 0 {
job.OffsetWait = true
} else {
s.enque(job, execQueue)
s.enqueue(job, execQueue)
}
}
}
}
func (s *SchedulerImpl) enque(job *Job, execQueue chan *Job) {
func (s *SchedulerImpl) enqueue(job *Job, execQueue chan *Job) {
s.log.Debug("Scheduler: Putting job on to exec queue", "name", job.Rule.Name, "id", job.Rule.Id)
execQueue <- job
}

View File

@ -113,7 +113,7 @@ func (g *dashboardGuardianImpl) checkAcl(permission m.PermissionType, acl []*m.D
return false, err
}
// evalute team rules
// evaluate team rules
for _, p := range acl {
for _, ug := range teams {
if ug.Id == p.TeamId && p.Permission >= permission {

View File

@ -58,7 +58,7 @@ func (cr *configReader) readConfig() ([]*DashboardsAsConfig, error) {
files, err := ioutil.ReadDir(cr.path)
if err != nil {
cr.log.Error("cant read dashboard provisioning files from directory", "path", cr.path)
cr.log.Error("can't read dashboard provisioning files from directory", "path", cr.path)
return dashboards, nil
}

View File

@ -19,7 +19,7 @@ func (cr *configReader) readConfig(path string) ([]*DatasourcesAsConfig, error)
files, err := ioutil.ReadDir(path)
if err != nil {
cr.log.Error("cant read datasource provisioning files from directory", "path", path)
cr.log.Error("can't read datasource provisioning files from directory", "path", path)
return datasources, nil
}

View File

@ -21,7 +21,7 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
}
err := GetAlertNotifications(cmd)
fmt.Printf("errror %v", err)
fmt.Printf("error %v", err)
So(err, ShouldBeNil)
So(cmd.Result, ShouldBeNil)
})

View File

@ -202,7 +202,7 @@ func (r *SqlAnnotationRepo) Find(query *annotations.ItemQuery) ([]*annotations.I
}
if query.Limit == 0 {
query.Limit = 10
query.Limit = 100
}
sql.WriteString(fmt.Sprintf(" ORDER BY epoch DESC LIMIT %v", query.Limit))

View File

@ -258,7 +258,7 @@ func InitTestDB(t *testing.T) *xorm.Engine {
// x.ShowSQL()
if err != nil {
t.Fatalf("Failed to init in memory sqllite3 db %v", err)
t.Fatalf("Failed to init test database: %v", err)
}
sqlutil.CleanDB(x)
@ -269,3 +269,19 @@ func InitTestDB(t *testing.T) *xorm.Engine {
return x
}
func IsTestDbMySql() bool {
if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
return db == dbMySql
}
return false
}
func IsTestDbPostgres() bool {
if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
return db == dbPostgres
}
return false
}

View File

@ -333,6 +333,7 @@ func GetUserOrgList(query *m.GetUserOrgListQuery) error {
sess.Join("INNER", "org", "org_user.org_id=org.id")
sess.Where("org_user.user_id=?", query.UserId)
sess.Cols("org.name", "org_user.role", "org_user.org_id")
sess.OrderBy("org.name")
err := sess.Find(&query.Result)
return err
}

View File

@ -223,7 +223,7 @@ func shouldRedactURLKey(s string) bool {
return strings.Contains(uppercased, "DATABASE_URL")
}
func applyEnvVariableOverrides() {
func applyEnvVariableOverrides() error {
appliedEnvOverrides = make([]string, 0)
for _, section := range Cfg.Sections() {
for _, key := range section.Keys() {
@ -238,7 +238,10 @@ func applyEnvVariableOverrides() {
envValue = "*********"
}
if shouldRedactURLKey(envKey) {
u, _ := url.Parse(envValue)
u, err := url.Parse(envValue)
if err != nil {
return fmt.Errorf("could not parse environment variable. key: %s, value: %s. error: %v", envKey, envValue, err)
}
ui := u.User
if ui != nil {
_, exists := ui.Password()
@ -252,6 +255,8 @@ func applyEnvVariableOverrides() {
}
}
}
return nil
}
func applyCommandLineDefaultProperties(props map[string]string) {
@ -377,7 +382,7 @@ func loadSpecifedConfigFile(configFile string) error {
return nil
}
func loadConfiguration(args *CommandLineArgs) {
func loadConfiguration(args *CommandLineArgs) error {
var err error
// load config defaults
@ -395,7 +400,7 @@ func loadConfiguration(args *CommandLineArgs) {
if err != nil {
fmt.Println(fmt.Sprintf("Failed to parse defaults.ini, %v", err))
os.Exit(1)
return
return err
}
Cfg.BlockMode = false
@ -413,7 +418,10 @@ func loadConfiguration(args *CommandLineArgs) {
}
// apply environment overrides
applyEnvVariableOverrides()
err = applyEnvVariableOverrides()
if err != nil {
return err
}
// apply command line overrides
applyCommandLineProperties(commandLineProps)
@ -424,6 +432,8 @@ func loadConfiguration(args *CommandLineArgs) {
// update data path and logging config
DataPath = makeAbsolute(Cfg.Section("paths").Key("data").String(), HomePath)
initLogging()
return err
}
func pathExists(path string) bool {
@ -471,7 +481,10 @@ func validateStaticRootPath() error {
func NewConfigContext(args *CommandLineArgs) error {
setHomePath(args)
loadConfiguration(args)
err := loadConfiguration(args)
if err != nil {
return err
}
Env = Cfg.Section("").Key("app_mode").MustString("development")
InstanceName = Cfg.Section("").Key("instance_name").MustString("unknown_instance_name")

View File

@ -37,6 +37,13 @@ func TestLoadingSettings(t *testing.T) {
So(appliedEnvOverrides, ShouldContain, "GF_SECURITY_ADMIN_PASSWORD=*********")
})
Convey("Should return an error when url is invalid", func() {
os.Setenv("GF_DATABASE_URL", "postgres.%31://grafana:secret@postgres:5432/grafana")
err := NewConfigContext(&CommandLineArgs{HomePath: "../../"})
So(err, ShouldNotBeNil)
})
Convey("Should replace password in URL when url environment is defined", func() {
os.Setenv("GF_DATABASE_URL", "mysql://user:secret@localhost:3306/database")
NewConfigContext(&CommandLineArgs{HomePath: "../../"})

View File

@ -82,11 +82,11 @@ func (m *MsSqlMacroEngine) evaluateMacro(name string, args []string) (string, er
if len(args) == 0 {
return "", fmt.Errorf("missing time column argument for macro %v", name)
}
return fmt.Sprintf("%s >= DATEADD(s, %d, '1970-01-01') AND %s <= DATEADD(s, %d, '1970-01-01')", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
return fmt.Sprintf("%s >= DATEADD(s, %d, '1970-01-01') AND %s <= DATEADD(s, %d, '1970-01-01')", args[0], m.TimeRange.GetFromAsSecondsEpoch(), args[0], m.TimeRange.GetToAsSecondsEpoch()), nil
case "__timeFrom":
return fmt.Sprintf("DATEADD(second, %d, '1970-01-01')", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
return fmt.Sprintf("DATEADD(second, %d, '1970-01-01')", m.TimeRange.GetFromAsSecondsEpoch()), nil
case "__timeTo":
return fmt.Sprintf("DATEADD(second, %d, '1970-01-01')", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
return fmt.Sprintf("DATEADD(second, %d, '1970-01-01')", m.TimeRange.GetToAsSecondsEpoch()), nil
case "__timeGroup":
if len(args) < 2 {
return "", fmt.Errorf("macro %v needs time column and interval", name)
@ -113,11 +113,11 @@ func (m *MsSqlMacroEngine) evaluateMacro(name string, args []string) (string, er
if len(args) == 0 {
return "", fmt.Errorf("missing time column argument for macro %v", name)
}
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], m.TimeRange.GetFromAsSecondsEpoch(), args[0], m.TimeRange.GetToAsSecondsEpoch()), nil
case "__unixEpochFrom":
return fmt.Sprintf("%d", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
return fmt.Sprintf("%d", m.TimeRange.GetFromAsSecondsEpoch()), nil
case "__unixEpochTo":
return fmt.Sprintf("%d", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
return fmt.Sprintf("%d", m.TimeRange.GetToAsSecondsEpoch()), nil
default:
return "", fmt.Errorf("Unknown macro %v", name)
}

View File

@ -1,6 +1,8 @@
package mssql
import (
"fmt"
"strconv"
"testing"
"time"
@ -13,112 +15,213 @@ import (
func TestMacroEngine(t *testing.T) {
Convey("MacroEngine", t, func() {
engine := &MsSqlMacroEngine{}
timeRange := &tsdb.TimeRange{From: "5m", To: "now"}
query := &tsdb.Query{
Model: simplejson.New(),
}
Convey("interpolate __time function", func() {
sql, err := engine.Interpolate(query, nil, "select $__time(time_column)")
So(err, ShouldBeNil)
Convey("Given a time range between 2018-04-12 00:00 and 2018-04-12 00:05", func() {
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
to := from.Add(5 * time.Minute)
timeRange := tsdb.NewFakeTimeRange("5m", "now", to)
So(sql, ShouldEqual, "select time_column AS time")
Convey("interpolate __time function", func() {
sql, err := engine.Interpolate(query, nil, "select $__time(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select time_column AS time")
})
Convey("interpolate __timeEpoch function", func() {
sql, err := engine.Interpolate(query, nil, "select $__timeEpoch(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select DATEDIFF(second, '1970-01-01', time_column) AS time")
})
Convey("interpolate __timeEpoch function wrapped in aggregation", func() {
sql, err := engine.Interpolate(query, nil, "select min($__timeEpoch(time_column))")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select min(DATEDIFF(second, '1970-01-01', time_column) AS time)")
})
Convey("interpolate __timeFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= DATEADD(s, %d, '1970-01-01') AND time_column <= DATEADD(s, %d, '1970-01-01')", from.Unix(), to.Unix()))
})
Convey("interpolate __timeGroup function", func() {
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "GROUP BY CAST(ROUND(DATEDIFF(second, '1970-01-01', time_column)/300.0, 0) as bigint)*300")
})
Convey("interpolate __timeGroup function with spaces around arguments", func() {
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "GROUP BY CAST(ROUND(DATEDIFF(second, '1970-01-01', time_column)/300.0, 0) as bigint)*300")
})
Convey("interpolate __timeGroup function with fill (value = NULL)", func() {
_, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m', NULL)")
fill := query.Model.Get("fill").MustBool()
fillNull := query.Model.Get("fillNull").MustBool()
fillInterval := query.Model.Get("fillInterval").MustInt()
So(err, ShouldBeNil)
So(fill, ShouldBeTrue)
So(fillNull, ShouldBeTrue)
So(fillInterval, ShouldEqual, 5*time.Minute.Seconds())
})
Convey("interpolate __timeGroup function with fill (value = float)", func() {
_, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m', 1.5)")
fill := query.Model.Get("fill").MustBool()
fillValue := query.Model.Get("fillValue").MustFloat64()
fillInterval := query.Model.Get("fillInterval").MustInt()
So(err, ShouldBeNil)
So(fill, ShouldBeTrue)
So(fillValue, ShouldEqual, 1.5)
So(fillInterval, ShouldEqual, 5*time.Minute.Seconds())
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", from.Unix()))
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", to.Unix()))
})
Convey("interpolate __unixEpochFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select time_column >= %d AND time_column <= %d", from.Unix(), to.Unix()))
})
Convey("interpolate __unixEpochFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
})
Convey("interpolate __unixEpochTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
})
})
Convey("interpolate __timeEpoch function", func() {
sql, err := engine.Interpolate(query, nil, "select $__timeEpoch(time_column)")
So(err, ShouldBeNil)
Convey("Given a time range between 1960-02-01 07:00 and 1965-02-03 08:00", func() {
from := time.Date(1960, 2, 1, 7, 0, 0, 0, time.UTC)
to := time.Date(1965, 2, 3, 8, 0, 0, 0, time.UTC)
timeRange := tsdb.NewTimeRange(strconv.FormatInt(from.UnixNano()/int64(time.Millisecond), 10), strconv.FormatInt(to.UnixNano()/int64(time.Millisecond), 10))
So(sql, ShouldEqual, "select DATEDIFF(second, '1970-01-01', time_column) AS time")
Convey("interpolate __timeFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= DATEADD(s, %d, '1970-01-01') AND time_column <= DATEADD(s, %d, '1970-01-01')", from.Unix(), to.Unix()))
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", from.Unix()))
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", to.Unix()))
})
Convey("interpolate __unixEpochFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select time_column >= %d AND time_column <= %d", from.Unix(), to.Unix()))
})
Convey("interpolate __unixEpochFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
})
Convey("interpolate __unixEpochTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
})
})
Convey("interpolate __timeEpoch function wrapped in aggregation", func() {
sql, err := engine.Interpolate(query, nil, "select min($__timeEpoch(time_column))")
So(err, ShouldBeNil)
Convey("Given a time range between 1960-02-01 07:00 and 1980-02-03 08:00", func() {
from := time.Date(1960, 2, 1, 7, 0, 0, 0, time.UTC)
to := time.Date(1980, 2, 3, 8, 0, 0, 0, time.UTC)
timeRange := tsdb.NewTimeRange(strconv.FormatInt(from.UnixNano()/int64(time.Millisecond), 10), strconv.FormatInt(to.UnixNano()/int64(time.Millisecond), 10))
So(sql, ShouldEqual, "select min(DATEDIFF(second, '1970-01-01', time_column) AS time)")
})
Convey("interpolate __timeFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
So(err, ShouldBeNil)
Convey("interpolate __timeFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= DATEADD(s, %d, '1970-01-01') AND time_column <= DATEADD(s, %d, '1970-01-01')", from.Unix(), to.Unix()))
})
So(sql, ShouldEqual, "WHERE time_column >= DATEADD(s, 18446744066914186738, '1970-01-01') AND time_column <= DATEADD(s, 18446744066914187038, '1970-01-01')")
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
So(err, ShouldBeNil)
Convey("interpolate __timeGroup function", func() {
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", from.Unix()))
})
So(sql, ShouldEqual, "GROUP BY CAST(ROUND(DATEDIFF(second, '1970-01-01', time_column)/300.0, 0) as bigint)*300")
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
So(err, ShouldBeNil)
Convey("interpolate __timeGroup function with spaces around arguments", func() {
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", to.Unix()))
})
So(sql, ShouldEqual, "GROUP BY CAST(ROUND(DATEDIFF(second, '1970-01-01', time_column)/300.0, 0) as bigint)*300")
})
Convey("interpolate __unixEpochFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time_column)")
So(err, ShouldBeNil)
Convey("interpolate __timeGroup function with fill (value = NULL)", func() {
_, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m', NULL)")
So(sql, ShouldEqual, fmt.Sprintf("select time_column >= %d AND time_column <= %d", from.Unix(), to.Unix()))
})
fill := query.Model.Get("fill").MustBool()
fillNull := query.Model.Get("fillNull").MustBool()
fillInterval := query.Model.Get("fillInterval").MustInt()
Convey("interpolate __unixEpochFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
So(err, ShouldBeNil)
So(err, ShouldBeNil)
So(fill, ShouldBeTrue)
So(fillNull, ShouldBeTrue)
So(fillInterval, ShouldEqual, 5*time.Minute.Seconds())
})
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
})
Convey("interpolate __timeGroup function with fill (value = float)", func() {
_, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m', 1.5)")
Convey("interpolate __unixEpochTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
So(err, ShouldBeNil)
fill := query.Model.Get("fill").MustBool()
fillValue := query.Model.Get("fillValue").MustFloat64()
fillInterval := query.Model.Get("fillInterval").MustInt()
So(err, ShouldBeNil)
So(fill, ShouldBeTrue)
So(fillValue, ShouldEqual, 1.5)
So(fillInterval, ShouldEqual, 5*time.Minute.Seconds())
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select DATEADD(second, 18446744066914186738, '1970-01-01')")
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select DATEADD(second, 18446744066914187038, '1970-01-01')")
})
Convey("interpolate __unixEpochFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select time_column >= 18446744066914186738 AND time_column <= 18446744066914187038")
})
Convey("interpolate __unixEpochFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select 18446744066914186738")
})
Convey("interpolate __unixEpochTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select 18446744066914187038")
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
})
})
})
}

View File

@ -8,8 +8,6 @@ import (
"strconv"
"strings"
"time"
"math"
_ "github.com/denisenkom/go-mssqldb"
@ -231,15 +229,18 @@ func (e MssqlQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.
return err
}
// converts column named time to unix timestamp in milliseconds to make
// native mysql datetime types and epoch dates work in
// annotation and table queries.
tsdb.ConvertSqlTimeColumnToEpochMs(values, timeIndex)
switch columnValue := values[timeIndex].(type) {
case int64:
timestamp = float64(columnValue * 1000)
timestamp = float64(columnValue)
case float64:
timestamp = columnValue * 1000
case time.Time:
timestamp = (float64(columnValue.Unix()) * 1000) + float64(columnValue.Nanosecond()/1e6) // in case someone is trying to map times beyond 2262 :D
timestamp = columnValue
default:
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp")
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
}
if metricIndex >= 0 {

View File

@ -16,10 +16,10 @@ import (
)
// To run this test, remove the Skip from SkipConvey
// and set up a MSSQL db named grafanatest and a user/password grafana/Password!
// The tests require a MSSQL db named grafanatest and a user/password grafana/Password!
// Use the docker/blocks/mssql_tests/docker-compose.yaml to spin up a
// preconfigured MSSQL server suitable for running these tests.
// Thers's also a dashboard.json in same directory that you can import to Grafana
// There is also a dashboard.json in same directory that you can import to Grafana
// once you've created a datasource for the test server/database.
// If needed, change the variable below to the IP address of the database.
var serverIP string = "localhost"
@ -188,10 +188,8 @@ func TestMSSQL(t *testing.T) {
})
}
for _, s := range series {
_, err = sess.Insert(s)
So(err, ShouldBeNil)
}
_, err = sess.InsertMulti(series)
So(err, ShouldBeNil)
Convey("When doing a metric query using timeGroup", func() {
query := &tsdb.TsdbQuery{
@ -312,10 +310,18 @@ func TestMSSQL(t *testing.T) {
Convey("Given a table with metrics having multiple values and measurements", func() {
type metric_values struct {
Time time.Time
Measurement string
ValueOne int64 `xorm:"integer 'valueOne'"`
ValueTwo int64 `xorm:"integer 'valueTwo'"`
Time time.Time
TimeInt64 int64 `xorm:"bigint 'timeInt64' not null"`
TimeInt64Nullable *int64 `xorm:"bigint 'timeInt64Nullable' null"`
TimeFloat64 float64 `xorm:"float 'timeFloat64' not null"`
TimeFloat64Nullable *float64 `xorm:"float 'timeFloat64Nullable' null"`
TimeInt32 int32 `xorm:"int(11) 'timeInt32' not null"`
TimeInt32Nullable *int32 `xorm:"int(11) 'timeInt32Nullable' null"`
TimeFloat32 float32 `xorm:"float(11) 'timeFloat32' not null"`
TimeFloat32Nullable *float32 `xorm:"float(11) 'timeFloat32Nullable' null"`
Measurement string
ValueOne int64 `xorm:"integer 'valueOne'"`
ValueTwo int64 `xorm:"integer 'valueTwo'"`
}
if exist, err := sess.IsTableExist(metric_values{}); err != nil || exist {
@ -330,26 +336,219 @@ func TestMSSQL(t *testing.T) {
return rand.Int63n(max-min) + min
}
var tInitial time.Time
series := []*metric_values{}
for _, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
series = append(series, &metric_values{
Time: t,
Measurement: "Metric A",
ValueOne: rnd(0, 100),
ValueTwo: rnd(0, 100),
})
series = append(series, &metric_values{
Time: t,
Measurement: "Metric B",
ValueOne: rnd(0, 100),
ValueTwo: rnd(0, 100),
})
for i, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
if i == 0 {
tInitial = t
}
tSeconds := t.Unix()
tSecondsInt32 := int32(tSeconds)
tSecondsFloat32 := float32(tSeconds)
tMilliseconds := tSeconds * 1e3
tMillisecondsFloat := float64(tMilliseconds)
first := metric_values{
Time: t,
TimeInt64: tMilliseconds,
TimeInt64Nullable: &(tMilliseconds),
TimeFloat64: tMillisecondsFloat,
TimeFloat64Nullable: &tMillisecondsFloat,
TimeInt32: tSecondsInt32,
TimeInt32Nullable: &tSecondsInt32,
TimeFloat32: tSecondsFloat32,
TimeFloat32Nullable: &tSecondsFloat32,
Measurement: "Metric A",
ValueOne: rnd(0, 100),
ValueTwo: rnd(0, 100),
}
second := first
second.Measurement = "Metric B"
second.ValueOne = rnd(0, 100)
second.ValueTwo = rnd(0, 100)
series = append(series, &first)
series = append(series, &second)
}
for _, s := range series {
_, err = sess.Insert(s)
_, err = sess.InsertMulti(series)
So(err, ShouldBeNil)
Convey("When doing a metric query using epoch (int64) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT TOP 1 timeInt64 as time, valueOne FROM metric_values ORDER BY time`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
}
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (int64 nullable) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT TOP 1 timeInt64Nullable as time, valueOne FROM metric_values ORDER BY time`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (float64) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT TOP 1 timeFloat64 as time, valueOne FROM metric_values ORDER BY time`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (float64 nullable) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT TOP 1 timeFloat64Nullable as time, valueOne FROM metric_values ORDER BY time`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (int32) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT TOP 1 timeInt32 as time, valueOne FROM metric_values ORDER BY time`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (int32 nullable) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT TOP 1 timeInt32Nullable as time, valueOne FROM metric_values ORDER BY time`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (float32) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT TOP 1 timeFloat32 as time, valueOne FROM metric_values ORDER BY time`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(float64(float32(tInitial.Unix())))*1e3)
})
Convey("When doing a metric query using epoch (float32 nullable) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT TOP 1 timeFloat32Nullable as time, valueOne FROM metric_values ORDER BY time`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(float64(float32(tInitial.Unix())))*1e3)
})
Convey("When doing a metric query grouping by time and select metric column should return correct series", func() {
query := &tsdb.TsdbQuery{
@ -476,7 +675,6 @@ func TestMSSQL(t *testing.T) {
resp, err := endpoint.Query(nil, nil, query)
queryResult := resp.Results["A"]
So(err, ShouldBeNil)
fmt.Println("query", "sql", queryResult.Meta)
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 4)
@ -696,7 +894,7 @@ func TestMSSQL(t *testing.T) {
columns := queryResult.Tables[0].Rows[0]
//Should be in milliseconds
So(columns[0].(float64), ShouldEqual, float64(dt.Unix()*1000))
So(columns[0].(float64), ShouldEqual, float64(dt.UnixNano()/1e6))
})
Convey("When doing an annotation query with a time column in epoch second format should return ms", func() {
@ -850,15 +1048,15 @@ func TestMSSQL(t *testing.T) {
func InitMSSQLTestDB(t *testing.T) *xorm.Engine {
x, err := xorm.NewEngine(sqlutil.TestDB_Mssql.DriverName, strings.Replace(sqlutil.TestDB_Mssql.ConnStr, "localhost", serverIP, 1))
if err != nil {
t.Fatalf("Failed to init mssql db %v", err)
}
x.DatabaseTZ = time.UTC
x.TZLocation = time.UTC
// x.ShowSQL()
if err != nil {
t.Fatalf("Failed to init mssql db %v", err)
}
return x
}

View File

@ -77,11 +77,11 @@ func (m *MySqlMacroEngine) evaluateMacro(name string, args []string) (string, er
if len(args) == 0 {
return "", fmt.Errorf("missing time column argument for macro %v", name)
}
return fmt.Sprintf("%s >= FROM_UNIXTIME(%d) AND %s <= FROM_UNIXTIME(%d)", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
return fmt.Sprintf("%s >= FROM_UNIXTIME(%d) AND %s <= FROM_UNIXTIME(%d)", args[0], m.TimeRange.GetFromAsSecondsEpoch(), args[0], m.TimeRange.GetToAsSecondsEpoch()), nil
case "__timeFrom":
return fmt.Sprintf("FROM_UNIXTIME(%d)", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
return fmt.Sprintf("FROM_UNIXTIME(%d)", m.TimeRange.GetFromAsSecondsEpoch()), nil
case "__timeTo":
return fmt.Sprintf("FROM_UNIXTIME(%d)", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
return fmt.Sprintf("FROM_UNIXTIME(%d)", m.TimeRange.GetToAsSecondsEpoch()), nil
case "__timeGroup":
if len(args) < 2 {
return "", fmt.Errorf("macro %v needs time column and interval", name)
@ -108,11 +108,11 @@ func (m *MySqlMacroEngine) evaluateMacro(name string, args []string) (string, er
if len(args) == 0 {
return "", fmt.Errorf("missing time column argument for macro %v", name)
}
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], m.TimeRange.GetFromAsSecondsEpoch(), args[0], m.TimeRange.GetToAsSecondsEpoch()), nil
case "__unixEpochFrom":
return fmt.Sprintf("%d", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
return fmt.Sprintf("%d", m.TimeRange.GetFromAsSecondsEpoch()), nil
case "__unixEpochTo":
return fmt.Sprintf("%d", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
return fmt.Sprintf("%d", m.TimeRange.GetToAsSecondsEpoch()), nil
default:
return "", fmt.Errorf("Unknown macro %v", name)
}

View File

@ -1,7 +1,10 @@
package mysql
import (
"fmt"
"strconv"
"testing"
"time"
"github.com/grafana/grafana/pkg/tsdb"
. "github.com/smartystreets/goconvey/convey"
@ -11,79 +14,179 @@ func TestMacroEngine(t *testing.T) {
Convey("MacroEngine", t, func() {
engine := &MySqlMacroEngine{}
query := &tsdb.Query{}
timeRange := &tsdb.TimeRange{From: "5m", To: "now"}
Convey("interpolate __time function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__time(time_column)")
So(err, ShouldBeNil)
Convey("Given a time range between 2018-04-12 00:00 and 2018-04-12 00:05", func() {
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
to := from.Add(5 * time.Minute)
timeRange := tsdb.NewFakeTimeRange("5m", "now", to)
So(sql, ShouldEqual, "select UNIX_TIMESTAMP(time_column) as time_sec")
Convey("interpolate __time function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__time(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select UNIX_TIMESTAMP(time_column) as time_sec")
})
Convey("interpolate __time function wrapped in aggregation", func() {
sql, err := engine.Interpolate(query, timeRange, "select min($__time(time_column))")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select min(UNIX_TIMESTAMP(time_column) as time_sec)")
})
Convey("interpolate __timeGroup function", func() {
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "GROUP BY cast(cast(UNIX_TIMESTAMP(time_column)/(300) as signed)*300 as signed)")
})
Convey("interpolate __timeGroup function with spaces around arguments", func() {
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "GROUP BY cast(cast(UNIX_TIMESTAMP(time_column)/(300) as signed)*300 as signed)")
})
Convey("interpolate __timeFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= FROM_UNIXTIME(%d) AND time_column <= FROM_UNIXTIME(%d)", from.Unix(), to.Unix()))
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", from.Unix()))
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", to.Unix()))
})
Convey("interpolate __unixEpochFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select time >= %d AND time <= %d", from.Unix(), to.Unix()))
})
Convey("interpolate __unixEpochFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
})
Convey("interpolate __unixEpochTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
})
})
Convey("interpolate __time function wrapped in aggregation", func() {
sql, err := engine.Interpolate(query, timeRange, "select min($__time(time_column))")
So(err, ShouldBeNil)
Convey("Given a time range between 1960-02-01 07:00 and 1965-02-03 08:00", func() {
from := time.Date(1960, 2, 1, 7, 0, 0, 0, time.UTC)
to := time.Date(1965, 2, 3, 8, 0, 0, 0, time.UTC)
timeRange := tsdb.NewTimeRange(strconv.FormatInt(from.UnixNano()/int64(time.Millisecond), 10), strconv.FormatInt(to.UnixNano()/int64(time.Millisecond), 10))
So(sql, ShouldEqual, "select min(UNIX_TIMESTAMP(time_column) as time_sec)")
Convey("interpolate __timeFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= FROM_UNIXTIME(%d) AND time_column <= FROM_UNIXTIME(%d)", from.Unix(), to.Unix()))
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", from.Unix()))
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", to.Unix()))
})
Convey("interpolate __unixEpochFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select time >= %d AND time <= %d", from.Unix(), to.Unix()))
})
Convey("interpolate __unixEpochFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
})
Convey("interpolate __unixEpochTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
})
})
Convey("interpolate __timeFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
So(err, ShouldBeNil)
Convey("Given a time range between 1960-02-01 07:00 and 1980-02-03 08:00", func() {
from := time.Date(1960, 2, 1, 7, 0, 0, 0, time.UTC)
to := time.Date(1980, 2, 3, 8, 0, 0, 0, time.UTC)
timeRange := tsdb.NewTimeRange(strconv.FormatInt(from.UnixNano()/int64(time.Millisecond), 10), strconv.FormatInt(to.UnixNano()/int64(time.Millisecond), 10))
So(sql, ShouldEqual, "WHERE time_column >= FROM_UNIXTIME(18446744066914186738) AND time_column <= FROM_UNIXTIME(18446744066914187038)")
Convey("interpolate __timeFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= FROM_UNIXTIME(%d) AND time_column <= FROM_UNIXTIME(%d)", from.Unix(), to.Unix()))
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", from.Unix()))
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", to.Unix()))
})
Convey("interpolate __unixEpochFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select time >= %d AND time <= %d", from.Unix(), to.Unix()))
})
Convey("interpolate __unixEpochFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
})
Convey("interpolate __unixEpochTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
})
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914186738)")
})
Convey("interpolate __timeGroup function", func() {
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "GROUP BY cast(cast(UNIX_TIMESTAMP(time_column)/(300) as signed)*300 as signed)")
})
Convey("interpolate __timeGroup function with spaces around arguments", func() {
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "GROUP BY cast(cast(UNIX_TIMESTAMP(time_column)/(300) as signed)*300 as signed)")
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914187038)")
})
Convey("interpolate __unixEpochFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(18446744066914186738)")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select 18446744066914186738 >= 18446744066914186738 AND 18446744066914186738 <= 18446744066914187038")
})
Convey("interpolate __unixEpochFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select 18446744066914186738")
})
Convey("interpolate __unixEpochTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select 18446744066914187038")
})
})
}

View File

@ -8,7 +8,6 @@ import (
"math"
"reflect"
"strconv"
"time"
"github.com/go-sql-driver/mysql"
"github.com/go-xorm/core"
@ -239,15 +238,18 @@ func (e MysqlQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.
return err
}
// converts column named time to unix timestamp in milliseconds to make
// native mysql datetime types and epoch dates work in
// annotation and table queries.
tsdb.ConvertSqlTimeColumnToEpochMs(values, timeIndex)
switch columnValue := values[timeIndex].(type) {
case int64:
timestamp = float64(columnValue * 1000)
timestamp = float64(columnValue)
case float64:
timestamp = columnValue * 1000
case time.Time:
timestamp = float64(columnValue.UnixNano() / 1e6)
timestamp = columnValue
default:
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
return fmt.Errorf("Invalid type for column time/time_sec, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
}
if metricIndex >= 0 {

View File

@ -3,25 +3,36 @@ package mysql
import (
"fmt"
"math/rand"
"strings"
"testing"
"time"
"github.com/go-xorm/xorm"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/sqlstore/sqlutil"
"github.com/grafana/grafana/pkg/tsdb"
. "github.com/smartystreets/goconvey/convey"
)
// To run this test, remove the Skip from SkipConvey
// and set up a MySQL db named grafana_tests and a user/password grafana/password
// To run this test, set runMySqlTests=true
// Or from the commandline: GRAFANA_TEST_DB=mysql go test -v ./pkg/tsdb/mysql
// The tests require a MySQL db named grafana_ds_tests and a user/password grafana/password
// Use the docker/blocks/mysql_tests/docker-compose.yaml to spin up a
// preconfigured MySQL server suitable for running these tests.
// Thers's also a dashboard.json in same directory that you can import to Grafana
// There is also a dashboard.json in same directory that you can import to Grafana
// once you've created a datasource for the test server/database.
func TestMySQL(t *testing.T) {
SkipConvey("MySQL", t, func() {
// change to true to run the MySQL tests
runMySqlTests := false
// runMySqlTests := true
if !(sqlstore.IsTestDbMySql() || runMySqlTests) {
t.Skip()
}
Convey("MySQL", t, func() {
x := InitMySQLTestDB(t)
endpoint := &MysqlQueryEndpoint{
@ -35,7 +46,7 @@ func TestMySQL(t *testing.T) {
sess := x.NewSession()
defer sess.Close()
fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.Local)
fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.UTC)
Convey("Given a table with different native data types", func() {
if exists, err := sess.IsTableExist("mysql_types"); err != nil || exists {
@ -121,9 +132,8 @@ func TestMySQL(t *testing.T) {
So(column[7].(float64), ShouldEqual, 1.11)
So(column[8].(float64), ShouldEqual, 2.22)
So(*column[9].(*float32), ShouldEqual, 3.33)
_, offset := time.Now().Zone()
So(column[10].(time.Time), ShouldHappenWithin, time.Duration(10*time.Second), time.Now().Add(time.Duration(offset)*time.Second))
So(column[11].(time.Time), ShouldHappenWithin, time.Duration(10*time.Second), time.Now().Add(time.Duration(offset)*time.Second))
So(column[10].(time.Time), ShouldHappenWithin, time.Duration(10*time.Second), time.Now())
So(column[11].(time.Time), ShouldHappenWithin, time.Duration(10*time.Second), time.Now())
So(column[12].(string), ShouldEqual, "11:11:11")
So(column[13].(int64), ShouldEqual, 2018)
So(*column[14].(*[]byte), ShouldHaveSameTypeAs, []byte{1})
@ -137,8 +147,8 @@ func TestMySQL(t *testing.T) {
So(column[22].(string), ShouldEqual, "longblob")
So(column[23].(string), ShouldEqual, "val2")
So(column[24].(string), ShouldEqual, "a,b")
So(column[25].(time.Time).Format("2006-01-02T00:00:00Z"), ShouldEqual, time.Now().Format("2006-01-02T00:00:00Z"))
So(column[26].(float64), ShouldEqual, float64(1514764861000))
So(column[25].(time.Time).Format("2006-01-02T00:00:00Z"), ShouldEqual, time.Now().UTC().Format("2006-01-02T00:00:00Z"))
So(column[26].(float64), ShouldEqual, float64(1.514764861123456*1e12))
So(column[27], ShouldEqual, nil)
So(column[28], ShouldEqual, nil)
So(column[29], ShouldEqual, "")
@ -177,10 +187,8 @@ func TestMySQL(t *testing.T) {
})
}
for _, s := range series {
_, err = sess.Insert(s)
So(err, ShouldBeNil)
}
_, err = sess.InsertMulti(series)
So(err, ShouldBeNil)
Convey("When doing a metric query using timeGroup", func() {
query := &tsdb.TsdbQuery{
@ -301,10 +309,19 @@ func TestMySQL(t *testing.T) {
Convey("Given a table with metrics having multiple values and measurements", func() {
type metric_values struct {
Time time.Time
Measurement string
ValueOne int64 `xorm:"integer 'valueOne'"`
ValueTwo int64 `xorm:"integer 'valueTwo'"`
Time time.Time `xorm:"datetime 'time' not null"`
TimeNullable *time.Time `xorm:"datetime(6) 'timeNullable' null"`
TimeInt64 int64 `xorm:"bigint(20) 'timeInt64' not null"`
TimeInt64Nullable *int64 `xorm:"bigint(20) 'timeInt64Nullable' null"`
TimeFloat64 float64 `xorm:"double 'timeFloat64' not null"`
TimeFloat64Nullable *float64 `xorm:"double 'timeFloat64Nullable' null"`
TimeInt32 int32 `xorm:"int(11) 'timeInt32' not null"`
TimeInt32Nullable *int32 `xorm:"int(11) 'timeInt32Nullable' null"`
TimeFloat32 float32 `xorm:"double 'timeFloat32' not null"`
TimeFloat32Nullable *float32 `xorm:"double 'timeFloat32Nullable' null"`
Measurement string
ValueOne int64 `xorm:"integer 'valueOne'"`
ValueTwo int64 `xorm:"integer 'valueTwo'"`
}
if exist, err := sess.IsTableExist(metric_values{}); err != nil || exist {
@ -319,26 +336,265 @@ func TestMySQL(t *testing.T) {
return rand.Int63n(max-min) + min
}
var tInitial time.Time
series := []*metric_values{}
for _, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
series = append(series, &metric_values{
Time: t,
Measurement: "Metric A",
ValueOne: rnd(0, 100),
ValueTwo: rnd(0, 100),
})
series = append(series, &metric_values{
Time: t,
Measurement: "Metric B",
ValueOne: rnd(0, 100),
ValueTwo: rnd(0, 100),
})
for i, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
if i == 0 {
tInitial = t
}
tSeconds := t.Unix()
tSecondsInt32 := int32(tSeconds)
tSecondsFloat32 := float32(tSeconds)
tMilliseconds := tSeconds * 1e3
tMillisecondsFloat := float64(tMilliseconds)
t2 := t
first := metric_values{
Time: t,
TimeNullable: &t2,
TimeInt64: tMilliseconds,
TimeInt64Nullable: &(tMilliseconds),
TimeFloat64: tMillisecondsFloat,
TimeFloat64Nullable: &tMillisecondsFloat,
TimeInt32: tSecondsInt32,
TimeInt32Nullable: &tSecondsInt32,
TimeFloat32: tSecondsFloat32,
TimeFloat32Nullable: &tSecondsFloat32,
Measurement: "Metric A",
ValueOne: rnd(0, 100),
ValueTwo: rnd(0, 100),
}
second := first
second.Measurement = "Metric B"
second.ValueOne = rnd(0, 100)
second.ValueTwo = rnd(0, 100)
series = append(series, &first)
series = append(series, &second)
}
for _, s := range series {
_, err := sess.Insert(s)
_, err = sess.InsertMulti(series)
So(err, ShouldBeNil)
Convey("When doing a metric query using time as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
}
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using time (nullable) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT timeNullable as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (int64) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT timeInt64 as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (int64 nullable) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT timeInt64Nullable as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (float64) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT timeFloat64 as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (float64 nullable) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT timeFloat64Nullable as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (int32) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT timeInt32 as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (int32 nullable) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT timeInt32Nullable as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
})
Convey("When doing a metric query using epoch (float32) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT timeFloat32 as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(float64(float32(tInitial.Unix())))*1e3)
})
Convey("When doing a metric query using epoch (float32 nullable) as time column should return metric with time in milliseconds", func() {
query := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT timeFloat32Nullable as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
"format": "time_series",
}),
RefId: "A",
},
},
}
resp, err := endpoint.Query(nil, nil, query)
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(len(queryResult.Series), ShouldEqual, 1)
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(float64(float32(tInitial.Unix())))*1e3)
})
Convey("When doing a metric query grouping by time and select metric column should return correct series", func() {
query := &tsdb.TsdbQuery{
@ -647,16 +903,16 @@ func TestMySQL(t *testing.T) {
}
func InitMySQLTestDB(t *testing.T) *xorm.Engine {
x, err := xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, sqlutil.TestDB_Mysql.ConnStr+"&parseTime=true")
x.DatabaseTZ = time.Local
x.TZLocation = time.Local
// x.ShowSQL()
x, err := xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, strings.Replace(sqlutil.TestDB_Mysql.ConnStr, "/grafana_tests", "/grafana_ds_tests", 1))
if err != nil {
t.Fatalf("Failed to init mysql db %v", err)
}
x.DatabaseTZ = time.UTC
x.TZLocation = time.UTC
// x.ShowSQL()
return x
}

View File

@ -35,7 +35,7 @@ func TestOpenTsdbExecutor(t *testing.T) {
})
Convey("Build metric with downsampling diabled", func() {
Convey("Build metric with downsampling disabled", func() {
query := &tsdb.Query{
Model: simplejson.New(),

Some files were not shown because too many files have changed in this diff Show More