mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge branch 'master' into readonly_dashboards
This commit is contained in:
commit
8a5cb088d3
@ -1,6 +1,6 @@
|
||||
[run]
|
||||
init_cmds = [
|
||||
["go", "build", "-o", "./bin/grafana-server", "./pkg/cmd/grafana-server"],
|
||||
["go", "run", "build.go", "build"],
|
||||
["./bin/grafana-server", "cfg:app_mode=development"]
|
||||
]
|
||||
watch_all = true
|
||||
@ -12,6 +12,6 @@ watch_dirs = [
|
||||
watch_exts = [".go", ".ini", ".toml"]
|
||||
build_delay = 1500
|
||||
cmds = [
|
||||
["go", "build", "-o", "./bin/grafana-server", "./pkg/cmd/grafana-server"],
|
||||
["go", "run", "build.go", "build"],
|
||||
["./bin/grafana-server", "cfg:app_mode=development"]
|
||||
]
|
||||
|
95
CHANGELOG.md
95
CHANGELOG.md
@ -15,8 +15,11 @@
|
||||
* **Alerting**: Add support for retries on alert queries [#5855](https://github.com/grafana/grafana/issues/5855), thx [@Thib17](https://github.com/Thib17)
|
||||
* **Table**: Table plugin value mappings [#7119](https://github.com/grafana/grafana/issues/7119), thx [infernix](https://github.com/infernix)
|
||||
* **IE11**: IE 11 compatibility [#11165](https://github.com/grafana/grafana/issues/11165)
|
||||
* **Scrolling**: Better scrolling experience [#11053](https://github.com/grafana/grafana/issues/11053), [#11252](https://github.com/grafana/grafana/issues/11252), [#10836](https://github.com/grafana/grafana/issues/10836), [#11185](https://github.com/grafana/grafana/issues/11185), [#11168](https://github.com/grafana/grafana/issues/11168)
|
||||
* **Docker**: Improved docker image (breaking changes regarding file ownership) [grafana-docker #141](https://github.com/grafana/grafana-docker/issues/141), thx [@Spindel](https://github.com/Spindel), [@ChristianKniep](https://github.com/ChristianKniep), [@brancz](https://github.com/brancz) and [@jangaraj](https://github.com/jangaraj)
|
||||
|
||||
### Minor
|
||||
|
||||
* **OpsGenie**: Add triggered alerts as description [#11046](https://github.com/grafana/grafana/pull/11046), thx [@llamashoes](https://github.com/llamashoes)
|
||||
* **Cloudwatch**: Support high resolution metrics [#10925](https://github.com/grafana/grafana/pull/10925), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Cloudwatch**: Add dimension filtering to CloudWatch `dimension_values()` [#10029](https://github.com/grafana/grafana/issues/10029), thx [@willyhutw](https://github.com/willyhutw)
|
||||
@ -30,6 +33,28 @@
|
||||
* **Prometheus**: Escape backslash in labels correctly. [#10555](https://github.com/grafana/grafana/issues/10555), thx [@roidelapluie](https://github.com/roidelapluie)
|
||||
* **Variables**: Case-insensitive sorting for template values [#11128](https://github.com/grafana/grafana/issues/11128) thx [@cross](https://github.com/cross)
|
||||
* **Annotations (native)**: Change default limit from 10 to 100 when querying api [#11569](https://github.com/grafana/grafana/issues/11569), thx [@flopp999](https://github.com/flopp999)
|
||||
* **MySQL/Postgres/MSSQL**: PostgreSQL datasource generates invalid query with dates before 1970 [#11530](https://github.com/grafana/grafana/issues/11530) thx [@ryantxu](https://github.com/ryantxu)
|
||||
* **Kiosk**: Adds url parameter for starting a dashboard in inactive mode [#11228](https://github.com/grafana/grafana/issues/11228), thx [@towolf](https://github.com/towolf)
|
||||
* **Dashboard**: Enable closing timepicker using escape key [#11332](https://github.com/grafana/grafana/issues/11332)
|
||||
* **Datasources**: Rename direct access mode in the data source settings [#11391](https://github.com/grafana/grafana/issues/11391)
|
||||
* **Search**: Display dashboards in folder indented [#11073](https://github.com/grafana/grafana/issues/11073)
|
||||
* **Units**: Use B/s instead Bps for Bytes per second [#9342](https://github.com/grafana/grafana/pull/9342), thx [@mayli](https://github.com/mayli)
|
||||
* **Units**: Radiation units [#11001](https://github.com/grafana/grafana/issues/11001), thx [@victorclaessen](https://github.com/victorclaessen)
|
||||
* **Units**: Timeticks unit [#11183](https://github.com/grafana/grafana/pull/11183), thx [@jtyr](https://github.com/jtyr)
|
||||
* **Units**: Concentration units and "Normal cubic metre" [#11211](https://github.com/grafana/grafana/issues/11211), thx [@flopp999](https://github.com/flopp999)
|
||||
* **Units**: New currency - Czech koruna [#11384](https://github.com/grafana/grafana/pull/11384), thx [@Rohlik](https://github.com/Rohlik)
|
||||
* **Avatar**: Fix DISABLE_GRAVATAR option [#11095](https://github.com/grafana/grafana/issues/11095)
|
||||
* **Heatmap**: Disable log scale when using time time series buckets [#10792](https://github.com/grafana/grafana/issues/10792)
|
||||
* **Provisioning**: Remove `id` from json when provisioning dashboards, [#11138](https://github.com/grafana/grafana/issues/11138)
|
||||
* **Prometheus**: tooltip for legend format not showing properly [#11516](https://github.com/grafana/grafana/issues/11516), thx [@svenklemm](https://github.com/svenklemm)
|
||||
* **Playlist**: Empty playlists cannot be deleted [#11133](https://github.com/grafana/grafana/issues/11133), thx [@kichristensen](https://github.com/kichristensen)
|
||||
* **Switch Orgs**: Alphabetic order in Switch Organization modal [#11556](https://github.com/grafana/grafana/issues/11556)
|
||||
* **Postgres**: improve `$__timeFilter` macro [#11578](https://github.com/grafana/grafana/issues/11578), thx [@svenklemm](https://github.com/svenklemm)
|
||||
* **Permission list**: Improved ux [#10747](https://github.com/grafana/grafana/issues/10747)
|
||||
* **Dashboard**: Sizing and positioning of settings menu icons [#11572](https://github.com/grafana/grafana/pull/11572)
|
||||
|
||||
### Tech
|
||||
* Migrated JavaScript files to TypeScript
|
||||
|
||||
# 5.0.4 (2018-03-28)
|
||||
|
||||
@ -57,7 +82,7 @@
|
||||
* **Dashboards**: Changing templated value from dropdown is causing unsaved changes [#11063](https://github.com/grafana/grafana/issues/11063)
|
||||
* **Prometheus**: Fixes bundled Prometheus 2.0 dashboard [#11016](https://github.com/grafana/grafana/issues/11016), thx [@roidelapluie](https://github.com/roidelapluie)
|
||||
* **Sidemenu**: Profile menu "invisible" when gravatar is disabled [#11097](https://github.com/grafana/grafana/issues/11097)
|
||||
* **Dashboard**: Fixes a bug with resizeable handles for panels [#11103](https://github.com/grafana/grafana/issues/11103)
|
||||
* **Dashboard**: Fixes a bug with resizable handles for panels [#11103](https://github.com/grafana/grafana/issues/11103)
|
||||
* **Alerting**: Telegram inline image mode fails when caption too long [#10975](https://github.com/grafana/grafana/issues/10975)
|
||||
* **Alerting**: Fixes silent failing validation [#11145](https://github.com/grafana/grafana/pull/11145)
|
||||
* **OAuth**: Only use jwt token if it contains an email address [#11127](https://github.com/grafana/grafana/pull/11127)
|
||||
@ -121,7 +146,7 @@ Grafana v5.0 is going to be the biggest and most foundational release Grafana ha
|
||||
### New Major Features
|
||||
- **Dashboards** Dashboard folders, [#1611](https://github.com/grafana/grafana/issues/1611)
|
||||
- **Teams** User groups (teams) implemented. Can be used in folder & dashboard permission list.
|
||||
- **Dashboard grid**: Panels are now layed out in a two dimensional grid (with x, y, w, h). [#9093](https://github.com/grafana/grafana/issues/9093).
|
||||
- **Dashboard grid**: Panels are now laid out in a two dimensional grid (with x, y, w, h). [#9093](https://github.com/grafana/grafana/issues/9093).
|
||||
- **Templating**: Vertical repeat direction for panel repeats.
|
||||
- **UX**: Major update to page header and navigation
|
||||
- **Dashboard settings**: Combine dashboard settings views into one with side menu, [#9750](https://github.com/grafana/grafana/issues/9750)
|
||||
@ -155,7 +180,7 @@ Dashboard panels and rows are positioned using a gridPos object `{x: 0, y: 0, w:
|
||||
* **Dashboard history**: New config file option versions_to_keep sets how many versions per dashboard to store, [#9671](https://github.com/grafana/grafana/issues/9671)
|
||||
* **Dashboard as cfg**: Load dashboards from file into Grafana on startup/change [#9654](https://github.com/grafana/grafana/issues/9654) [#5269](https://github.com/grafana/grafana/issues/5269)
|
||||
* **Prometheus**: Grafana can now send alerts to Prometheus Alertmanager while firing [#7481](https://github.com/grafana/grafana/issues/7481), thx [@Thib17](https://github.com/Thib17) and [@mtanda](https://github.com/mtanda)
|
||||
* **Table**: Support multiple table formated queries in table panel [#9170](https://github.com/grafana/grafana/issues/9170), thx [@davkal](https://github.com/davkal)
|
||||
* **Table**: Support multiple table formatted queries in table panel [#9170](https://github.com/grafana/grafana/issues/9170), thx [@davkal](https://github.com/davkal)
|
||||
* **Security**: Protect against brute force (frequent) login attempts [#7616](https://github.com/grafana/grafana/issues/7616)
|
||||
|
||||
## Minor
|
||||
@ -177,7 +202,7 @@ Dashboard panels and rows are positioned using a gridPos object `{x: 0, y: 0, w:
|
||||
* **Sensu**: Send alert message to sensu output [#9551](https://github.com/grafana/grafana/issues/9551), thx [@cjchand](https://github.com/cjchand)
|
||||
* **Singlestat**: suppress error when result contains no datapoints [#9636](https://github.com/grafana/grafana/issues/9636), thx [@utkarshcmu](https://github.com/utkarshcmu)
|
||||
* **Postgres/MySQL**: Control quoting in SQL-queries when using template variables [#9030](https://github.com/grafana/grafana/issues/9030), thanks [@svenklemm](https://github.com/svenklemm)
|
||||
* **Pagerduty**: Pagerduty dont auto resolve incidents by default anymore. [#10222](https://github.com/grafana/grafana/issues/10222)
|
||||
* **Pagerduty**: Pagerduty don't auto resolve incidents by default anymore. [#10222](https://github.com/grafana/grafana/issues/10222)
|
||||
* **Cloudwatch**: Fix for multi-valued templated queries. [#9903](https://github.com/grafana/grafana/issues/9903)
|
||||
|
||||
## Tech
|
||||
@ -255,7 +280,7 @@ The following properties have been deprecated and will be removed in a future re
|
||||
* **Annotations**: Add support for creating annotations from graph panel [#8197](https://github.com/grafana/grafana/pull/8197)
|
||||
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
|
||||
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
|
||||
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
|
||||
* **Graph**: Add support for local formatting in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
|
||||
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
|
||||
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/2764)
|
||||
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)
|
||||
@ -292,7 +317,7 @@ The following properties have been deprecated and will be removed in a future re
|
||||
* **Graphite**: Fix for Grafana internal metrics to Graphite sending NaN values [#9279](https://github.com/grafana/grafana/issues/9279)
|
||||
* **HTTP API**: Fix for HEAD method requests [#9307](https://github.com/grafana/grafana/issues/9307)
|
||||
* **Templating**: Fix for duplicate template variable queries when refresh is set to time range change [#9185](https://github.com/grafana/grafana/issues/9185)
|
||||
* **Metrics**: dont write NaN values to graphite [#9279](https://github.com/grafana/grafana/issues/9279)
|
||||
* **Metrics**: don't write NaN values to graphite [#9279](https://github.com/grafana/grafana/issues/9279)
|
||||
|
||||
# 4.5.1 (2017-09-15)
|
||||
|
||||
@ -329,12 +354,12 @@ The following properties have been deprecated and will be removed in a future re
|
||||
### Breaking change
|
||||
|
||||
* **InfluxDB/Elasticsearch**: The panel & data source option named "Group by time interval" is now named "Min time interval" and does now always define a lower limit for the auto group by time. Without having to use `>` prefix (that prefix still works). This should in theory have close to zero actual impact on existing dashboards. It does mean that if you used this setting to define a hard group by time interval of, say "1d", if you zoomed to a time range wide enough the time range could increase above the "1d" range as the setting is now always considered a lower limit.
|
||||
* **Elasticsearch**: Elasticsearch metric queries without date histogram now return table formated data making table panel much easier to use for this use case. Should not break/change existing dashboards with stock panels but external panel plugins can be affected.
|
||||
* **Elasticsearch**: Elasticsearch metric queries without date histogram now return table formatted data making table panel much easier to use for this use case. Should not break/change existing dashboards with stock panels but external panel plugins can be affected.
|
||||
|
||||
## Changes
|
||||
|
||||
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
|
||||
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
|
||||
* **InfluxDB**: Added parenthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
@ -346,7 +371,7 @@ The following properties have been deprecated and will be removed in a future re
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Search**: Fix for issue that casued search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
|
||||
* **Search**: Fix for issue that caused search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
|
||||
* **Modals**: ESC key now closes modal again, fixes [#8981](https://github.com/grafana/grafana/issues/8988), thx [@j-white](https://github.com/j-white)
|
||||
|
||||
# 4.4.2 (2017-08-01)
|
||||
@ -685,12 +710,12 @@ due to too many connections/file handles on the data source backend. This proble
|
||||
### Enhancements
|
||||
* **Login**: Adds option to disable username/password logins, closes [#4674](https://github.com/grafana/grafana/issues/4674)
|
||||
* **SingleStat**: Add seriename as option in singlestat panel, closes [#4740](https://github.com/grafana/grafana/issues/4740)
|
||||
* **Localization**: Week start day now dependant on browser locale setting, closes [#3003](https://github.com/grafana/grafana/issues/3003)
|
||||
* **Localization**: Week start day now dependent on browser locale setting, closes [#3003](https://github.com/grafana/grafana/issues/3003)
|
||||
* **Templating**: Update panel repeats for variables that change on time refresh, closes [#5021](https://github.com/grafana/grafana/issues/5021)
|
||||
* **Templating**: Add support for numeric and alphabetical sorting of variable values, closes [#2839](https://github.com/grafana/grafana/issues/2839)
|
||||
* **Elasticsearch**: Support to set Precision Threshold for Unique Count metric, closes [#4689](https://github.com/grafana/grafana/issues/4689)
|
||||
* **Navigation**: Add search to org swithcer, closes [#2609](https://github.com/grafana/grafana/issues/2609)
|
||||
* **Database**: Allow database config using one propertie, closes [#5456](https://github.com/grafana/grafana/pull/5456)
|
||||
* **Database**: Allow database config using one property, closes [#5456](https://github.com/grafana/grafana/pull/5456)
|
||||
* **Graphite**: Add support for groupByNodes, closes [#5613](https://github.com/grafana/grafana/pull/5613)
|
||||
* **Influxdb**: Add support for elapsed(), closes [#5827](https://github.com/grafana/grafana/pull/5827)
|
||||
* **OpenTSDB**: Add support for explicitTags for OpenTSDB>=2.3, closes [#6360](https://github.com/grafana/grafana/pull/6361)
|
||||
@ -757,7 +782,7 @@ due to too many connections/file handles on the data source backend. This proble
|
||||
* **Datasource**: Pending data source requests are cancelled before new ones are issues (Graphite & Prometheus), closes [#5321](https://github.com/grafana/grafana/issues/5321)
|
||||
|
||||
### Breaking changes
|
||||
* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log ouput.
|
||||
* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log output.
|
||||
* **Graphite** : The Graph panel no longer have a Graphite PNG option. closes [#5367](https://github.com/grafana/grafana/issues/5367)
|
||||
|
||||
### Bug fixes
|
||||
@ -775,7 +800,7 @@ due to too many connections/file handles on the data source backend. This proble
|
||||
* **Annotations**: Annotations can now use a template variable as data source, closes [#5054](https://github.com/grafana/grafana/issues/5054)
|
||||
* **Time picker**: Fixed issue timepicker and UTC when reading time from URL, fixes [#5078](https://github.com/grafana/grafana/issues/5078)
|
||||
* **CloudWatch**: Support for Multiple Account by AssumeRole, closes [#3522](https://github.com/grafana/grafana/issues/3522)
|
||||
* **Singlestat**: Fixed alignment and minium height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
|
||||
* **Singlestat**: Fixed alignment and minimum height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
|
||||
* **Share modal**: Fixed link when using grafana under dashboard sub url, fixes [#5109](https://github.com/grafana/grafana/issues/5109)
|
||||
* **Prometheus**: Fixed bug in query editor that caused it not to load when reloading page, fixes [#5107](https://github.com/grafana/grafana/issues/5107)
|
||||
* **Elasticsearch**: Fixed bug when template variable query returns numeric values, fixes [#5097](https://github.com/grafana/grafana/issues/5097), fixes [#5088](https://github.com/grafana/grafana/issues/5088)
|
||||
@ -792,7 +817,7 @@ due to too many connections/file handles on the data source backend. This proble
|
||||
* **Graph**: Fixed broken PNG rendering in graph panel, fixes [#5025](https://github.com/grafana/grafana/issues/5025)
|
||||
* **Graph**: Fixed broken xaxis on graph panel, fixes [#5024](https://github.com/grafana/grafana/issues/5024)
|
||||
|
||||
* **Influxdb**: Fixes crash when hiding middle serie, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
|
||||
* **Influxdb**: Fixes crash when hiding middle series, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
|
||||
|
||||
# 3.0.1 Stable (2016-05-11)
|
||||
|
||||
@ -804,7 +829,7 @@ due to too many connections/file handles on the data source backend. This proble
|
||||
### Bug fixes
|
||||
* **Dashboard title**: Fixed max dashboard title width (media query) for large screens, fixes [#4859](https://github.com/grafana/grafana/issues/4859)
|
||||
* **Annotations**: Fixed issue with entering annotation edit view, fixes [#4857](https://github.com/grafana/grafana/issues/4857)
|
||||
* **Remove query**: Fixed issue with removing query for data sources without collapsable query editors, fixes [#4856](https://github.com/grafana/grafana/issues/4856)
|
||||
* **Remove query**: Fixed issue with removing query for data sources without collapsible query editors, fixes [#4856](https://github.com/grafana/grafana/issues/4856)
|
||||
* **Graphite PNG**: Fixed issue graphite png rendering option, fixes [#4864](https://github.com/grafana/grafana/issues/4864)
|
||||
* **InfluxDB**: Fixed issue missing plus group by iconn, fixes [#4862](https://github.com/grafana/grafana/issues/4862)
|
||||
* **Graph**: Fixes missing line mode for thresholds, fixes [#4902](https://github.com/grafana/grafana/pull/4902)
|
||||
@ -820,11 +845,11 @@ due to too many connections/file handles on the data source backend. This proble
|
||||
|
||||
### Bug fixes
|
||||
* **InfluxDB 0.12**: Fixed issue templating and `show tag values` query only returning tags for first measurement, fixes [#4726](https://github.com/grafana/grafana/issues/4726)
|
||||
* **Templating**: Fixed issue with regex formating when matching multiple values, fixes [#4755](https://github.com/grafana/grafana/issues/4755)
|
||||
* **Templating**: Fixed issue with regex formatting when matching multiple values, fixes [#4755](https://github.com/grafana/grafana/issues/4755)
|
||||
* **Templating**: Fixed issue with custom all value and escaping, fixes [#4736](https://github.com/grafana/grafana/issues/4736)
|
||||
* **Dashlist**: Fixed issue dashboard list panel and caching tags, fixes [#4768](https://github.com/grafana/grafana/issues/4768)
|
||||
* **Graph**: Fixed issue with unneeded scrollbar in legend for Firefox, fixes [#4760](https://github.com/grafana/grafana/issues/4760)
|
||||
* **Table panel**: Fixed issue table panel formating string array properties, fixes [#4791](https://github.com/grafana/grafana/issues/4791)
|
||||
* **Table panel**: Fixed issue table panel formatting string array properties, fixes [#4791](https://github.com/grafana/grafana/issues/4791)
|
||||
* **grafana-cli**: Improve error message when failing to install plugins due to corrupt response, fixes [#4651](https://github.com/grafana/grafana/issues/4651)
|
||||
* **Singlestat**: Fixes prefix an postfix for gauges, fixes [#4812](https://github.com/grafana/grafana/issues/4812)
|
||||
* **Singlestat**: Fixes auto-refresh on change for some options, fixes [#4809](https://github.com/grafana/grafana/issues/4809)
|
||||
@ -916,7 +941,7 @@ slack channel (link to slack channel in readme).
|
||||
### Bug fixes
|
||||
* **Playlist**: Fix for memory leak when running a playlist, closes [#3794](https://github.com/grafana/grafana/pull/3794)
|
||||
* **InfluxDB**: Fix for InfluxDB and table panel when using Format As Table and having group by time, fixes [#3928](https://github.com/grafana/grafana/issues/3928)
|
||||
* **Panel Time shift**: Fix for panel time range and using dashboard times liek `Today` and `This Week`, fixes [#3941](https://github.com/grafana/grafana/issues/3941)
|
||||
* **Panel Time shift**: Fix for panel time range and using dashboard times like `Today` and `This Week`, fixes [#3941](https://github.com/grafana/grafana/issues/3941)
|
||||
* **Row repeat**: Repeated rows will now appear next to each other and not by the bottom of the dashboard, fixes [#3942](https://github.com/grafana/grafana/issues/3942)
|
||||
* **Png renderer**: Fix for phantomjs path on windows, fixes [#3657](https://github.com/grafana/grafana/issues/3657)
|
||||
|
||||
@ -940,7 +965,7 @@ slack channel (link to slack channel in readme).
|
||||
### Bug Fixes
|
||||
* **metric editors**: Fix for clicking typeahead auto dropdown option, fixes [#3428](https://github.com/grafana/grafana/issues/3428)
|
||||
* **influxdb**: Fixed issue showing Group By label only on first query, fixes [#3453](https://github.com/grafana/grafana/issues/3453)
|
||||
* **logging**: Add more verbose info logging for http reqeusts, closes [#3405](https://github.com/grafana/grafana/pull/3405)
|
||||
* **logging**: Add more verbose info logging for http requests, closes [#3405](https://github.com/grafana/grafana/pull/3405)
|
||||
|
||||
# 2.6.0-Beta1 (2015-12-04)
|
||||
|
||||
@ -967,7 +992,7 @@ slack channel (link to slack channel in readme).
|
||||
|
||||
**New Feature: Mix data sources**
|
||||
- A built in data source is now available named `-- Mixed --`, When picked in the metrics tab,
|
||||
it allows you to add queries of differnet data source types & instances to the same graph/panel!
|
||||
it allows you to add queries of different data source types & instances to the same graph/panel!
|
||||
[Issue #436](https://github.com/grafana/grafana/issues/436)
|
||||
|
||||
**New Feature: Elasticsearch Metrics Query Editor and Viz Support**
|
||||
@ -1006,7 +1031,7 @@ it allows you to add queries of differnet data source types & instances to the s
|
||||
- [Issue #2564](https://github.com/grafana/grafana/issues/2564). Templating: Another atempt at fixing #2534 (Init multi value template var used in repeat panel from url)
|
||||
- [Issue #2620](https://github.com/grafana/grafana/issues/2620). Graph: multi series tooltip did no highlight correct point when stacking was enabled and series were of different resolution
|
||||
- [Issue #2636](https://github.com/grafana/grafana/issues/2636). InfluxDB: Do no show template vars in dropdown for tag keys and group by keys
|
||||
- [Issue #2604](https://github.com/grafana/grafana/issues/2604). InfluxDB: More alias options, can now use `$[0-9]` syntax to reference part of a measurement name (seperated by dots)
|
||||
- [Issue #2604](https://github.com/grafana/grafana/issues/2604). InfluxDB: More alias options, can now use `$[0-9]` syntax to reference part of a measurement name (separated by dots)
|
||||
|
||||
**Breaking Changes**
|
||||
- Notice to makers/users of custom data sources, there is a minor breaking change in 2.2 that
|
||||
@ -1088,7 +1113,7 @@ Grunt & Watch tasks:
|
||||
- [Issue #1826](https://github.com/grafana/grafana/issues/1826). User role 'Viewer' are now prohibited from entering edit mode (and doing other transient dashboard edits). A new role `Read Only Editor` will replace the old Viewer behavior
|
||||
- [Issue #1928](https://github.com/grafana/grafana/issues/1928). HTTP API: GET /api/dashboards/db/:slug response changed property `model` to `dashboard` to match the POST request nameing
|
||||
- Backend render URL changed from `/render/dashboard/solo` `render/dashboard-solo/` (in order to have consistent dashboard url `/dashboard/:type/:slug`)
|
||||
- Search HTTP API response has changed (simplified), tags list moved to seperate HTTP resource URI
|
||||
- Search HTTP API response has changed (simplified), tags list moved to separate HTTP resource URI
|
||||
- Datasource HTTP api breaking change, ADD datasource is now POST /api/datasources/, update is now PUT /api/datasources/:id
|
||||
|
||||
**Fixes**
|
||||
@ -1105,7 +1130,7 @@ Grunt & Watch tasks:
|
||||
# 2.0.2 (2015-04-22)
|
||||
|
||||
**Fixes**
|
||||
- [Issue #1832](https://github.com/grafana/grafana/issues/1832). Graph Panel + Legend Table mode: Many series casued zero height graph, now legend will never reduce the height of the graph below 50% of row height.
|
||||
- [Issue #1832](https://github.com/grafana/grafana/issues/1832). Graph Panel + Legend Table mode: Many series caused zero height graph, now legend will never reduce the height of the graph below 50% of row height.
|
||||
- [Issue #1846](https://github.com/grafana/grafana/issues/1846). Snapshots: Fixed issue with snapshoting dashboards with an interval template variable
|
||||
- [Issue #1848](https://github.com/grafana/grafana/issues/1848). Panel timeshift: You can now use panel timeshift without a relative time override
|
||||
|
||||
@ -1147,7 +1172,7 @@ Grunt & Watch tasks:
|
||||
|
||||
**Fixes**
|
||||
- [Issue #1649](https://github.com/grafana/grafana/issues/1649). HTTP API: grafana /render calls nows with api keys
|
||||
- [Issue #1667](https://github.com/grafana/grafana/issues/1667). Datasource proxy & session timeout fix (casued 401 Unauthorized error after a while)
|
||||
- [Issue #1667](https://github.com/grafana/grafana/issues/1667). Datasource proxy & session timeout fix (caused 401 Unauthorized error after a while)
|
||||
- [Issue #1707](https://github.com/grafana/grafana/issues/1707). Unsaved changes: Do not show for snapshots, scripted and file based dashboards
|
||||
- [Issue #1703](https://github.com/grafana/grafana/issues/1703). Unsaved changes: Do not show for users with role `Viewer`
|
||||
- [Issue #1675](https://github.com/grafana/grafana/issues/1675). Data source proxy: Fixed issue with Gzip enabled and data source proxy
|
||||
@ -1160,14 +1185,14 @@ Grunt & Watch tasks:
|
||||
|
||||
**Important Note**
|
||||
|
||||
Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated backend server. Please read the [Documentation](http://docs.grafana.org) for more detailed about this SIGNIFCANT change to Grafana
|
||||
Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated backend server. Please read the [Documentation](http://docs.grafana.org) for more detailed about this SIGNIFICANT change to Grafana
|
||||
|
||||
**New features**
|
||||
- [Issue #1623](https://github.com/grafana/grafana/issues/1623). Share Dashboard: Dashboard snapshot sharing (dash and data snapshot), save to local or save to public snapshot dashboard snapshots.raintank.io site
|
||||
- [Issue #1622](https://github.com/grafana/grafana/issues/1622). Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embedd a single graph on another web site
|
||||
- [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes inbetween the user is promted with a warning if he really wants to overwrite the other's changes
|
||||
- [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes in between the user is promted with a warning if he really wants to overwrite the other's changes
|
||||
- [Issue #1331](https://github.com/grafana/grafana/issues/1331). Graph & Singlestat: New axis/unit format selector and more units (kbytes, Joule, Watt, eV), and new design for graph axis & grid tab and single stat options tab views
|
||||
- [Issue #1241](https://github.com/grafana/grafana/issues/1242). Timepicker: New option in timepicker (under dashboard settings), to change ``now`` to be for example ``now-1m``, usefull when you want to ignore last minute because it contains incomplete data
|
||||
- [Issue #1241](https://github.com/grafana/grafana/issues/1242). Timepicker: New option in timepicker (under dashboard settings), to change ``now`` to be for example ``now-1m``, useful when you want to ignore last minute because it contains incomplete data
|
||||
- [Issue #171](https://github.com/grafana/grafana/issues/171). Panel: Different time periods, panels can override dashboard relative time and/or add a time shift
|
||||
- [Issue #1488](https://github.com/grafana/grafana/issues/1488). Dashboard: Clone dashboard / Save as
|
||||
- [Issue #1458](https://github.com/grafana/grafana/issues/1458). User: persisted user option for dark or light theme (no longer an option on a dashboard)
|
||||
@ -1198,7 +1223,7 @@ Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated
|
||||
|
||||
**OpenTSDB breaking change**
|
||||
- [Issue #1438](https://github.com/grafana/grafana/issues/1438). OpenTSDB: Automatic downsample interval passed to OpenTSDB (depends on timespan and graph width)
|
||||
- NOTICE, Downsampling is now enabled by default, so if you have not picked a downsample aggregator in your metric query do so or your graphs will be missleading
|
||||
- NOTICE, Downsampling is now enabled by default, so if you have not picked a downsample aggregator in your metric query do so or your graphs will be misleading
|
||||
- This will make Grafana a lot quicker for OpenTSDB users when viewing large time spans without having to change the downsample interval manually.
|
||||
|
||||
**Tech**
|
||||
@ -1229,7 +1254,7 @@ Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated
|
||||
- [Issue #1114](https://github.com/grafana/grafana/issues/1114). Graphite: Lexer fix, allow equal sign (=) in metric paths
|
||||
- [Issue #1136](https://github.com/grafana/grafana/issues/1136). Graph: Fix to legend value Max and negative values
|
||||
- [Issue #1150](https://github.com/grafana/grafana/issues/1150). SinglestatPanel: Fixed absolute drilldown link issue
|
||||
- [Issue #1123](https://github.com/grafana/grafana/issues/1123). Firefox: Workaround for Firefox bug, casued input text fields to not be selectable and not have placeable cursor
|
||||
- [Issue #1123](https://github.com/grafana/grafana/issues/1123). Firefox: Workaround for Firefox bug, caused input text fields to not be selectable and not have placeable cursor
|
||||
- [Issue #1108](https://github.com/grafana/grafana/issues/1108). Graph: Fix for tooltip series order when series draw order was changed with zindex property
|
||||
|
||||
# 1.9.0-rc1 (2014-11-17)
|
||||
@ -1306,7 +1331,7 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
|
||||
- [Issue #234](https://github.com/grafana/grafana/issues/234). Templating: Interval variable type for time intervals summarize/group by parameter, included "auto" option, and auto step counts option.
|
||||
- [Issue #262](https://github.com/grafana/grafana/issues/262). Templating: Ability to use template variables for function parameters via custom variable type, can be used as parameter for movingAverage or scaleToSeconds for example
|
||||
- [Issue #312](https://github.com/grafana/grafana/issues/312). Templating: Can now use template variables in panel titles
|
||||
- [Issue #613](https://github.com/grafana/grafana/issues/613). Templating: Full support for InfluxDB, filter by part of series names, extract series substrings, nested queries, multipe where clauses!
|
||||
- [Issue #613](https://github.com/grafana/grafana/issues/613). Templating: Full support for InfluxDB, filter by part of series names, extract series substrings, nested queries, multiple where clauses!
|
||||
- Template variables can be initialized from url, with var-my_varname=value, breaking change, before it was just my_varname.
|
||||
- Templating and url state sync has some issues that are not solved for this release, see [Issue #772](https://github.com/grafana/grafana/issues/772) for more details.
|
||||
|
||||
@ -1395,7 +1420,7 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
|
||||
- [Issue #136](https://github.com/grafana/grafana/issues/136). Graph: New legend display option "Align as table"
|
||||
- [Issue #556](https://github.com/grafana/grafana/issues/556). Graph: New legend display option "Right side", will show legend to the right of the graph
|
||||
- [Issue #604](https://github.com/grafana/grafana/issues/604). Graph: New axis format, 'bps' (SI unit in steps of 1000) useful for network gear metics
|
||||
- [Issue #626](https://github.com/grafana/grafana/issues/626). Graph: Downscale y axis to more precise unit, value of 0.1 for seconds format will be formated as 100 ms. Thanks @kamaradclimber
|
||||
- [Issue #626](https://github.com/grafana/grafana/issues/626). Graph: Downscale y axis to more precise unit, value of 0.1 for seconds format will be formatted as 100 ms. Thanks @kamaradclimber
|
||||
- [Issue #618](https://github.com/grafana/grafana/issues/618). OpenTSDB: Series alias option to override metric name returned from opentsdb. Thanks @heldr
|
||||
|
||||
**Documentation**
|
||||
@ -1425,13 +1450,13 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
|
||||
- [Issue #522](https://github.com/grafana/grafana/issues/522). Series names and column name typeahead cache fix
|
||||
- [Issue #504](https://github.com/grafana/grafana/issues/504). Fixed influxdb issue with raw query that caused wrong value column detection
|
||||
- [Issue #526](https://github.com/grafana/grafana/issues/526). Default property that marks which datasource is default in config.js is now optional
|
||||
- [Issue #342](https://github.com/grafana/grafana/issues/342). Auto-refresh caused 2 refreshes (and hence mulitple queries) each time (at least in firefox)
|
||||
- [Issue #342](https://github.com/grafana/grafana/issues/342). Auto-refresh caused 2 refreshes (and hence multiple queries) each time (at least in firefox)
|
||||
|
||||
# 1.6.0 (2014-06-16)
|
||||
|
||||
#### New features or improvements
|
||||
- [Issue #427](https://github.com/grafana/grafana/issues/427). New Y-axis formater for metric values that represent seconds, Thanks @jippi
|
||||
- [Issue #390](https://github.com/grafana/grafana/issues/390). Allow special characters in serie names (influxdb datasource), Thanks @majst01
|
||||
- [Issue #390](https://github.com/grafana/grafana/issues/390). Allow special characters in series names (influxdb datasource), Thanks @majst01
|
||||
- [Issue #428](https://github.com/grafana/grafana/issues/428). Refactoring of filterSrv, Thanks @Tetha
|
||||
- [Issue #445](https://github.com/grafana/grafana/issues/445). New config for playlist feature. Set playlist_timespan to set default playlist interval, Thanks @rmca
|
||||
- [Issue #461](https://github.com/grafana/grafana/issues/461). New graphite function definition added isNonNull, Thanks @tmonk42
|
||||
@ -1452,13 +1477,13 @@ Read this [blog post](https://grafana.com/blog/2014/09/11/grafana-1.8.0-rc1-rele
|
||||
- [Issue #475](https://github.com/grafana/grafana/issues/475). Add panel icon and Row edit button is replaced by the Row edit menu
|
||||
- New graphs now have a default empty query
|
||||
- Add Row button now creates a row with default height of 250px (no longer opens dashboard settings modal)
|
||||
- Clean up of config.sample.js, graphiteUrl removed (still works, but depricated, removed in future)
|
||||
- Clean up of config.sample.js, graphiteUrl removed (still works, but deprecated, removed in future)
|
||||
Use datasources config instead. panel_names removed from config.js. Use plugins.panels to add custom panels
|
||||
- Graphite panel is now renamed graph (Existing dashboards will still work)
|
||||
|
||||
#### Fixes
|
||||
- [Issue #126](https://github.com/grafana/grafana/issues/126). Graphite query lexer change, can now handle regex parameters for aliasSub function
|
||||
- [Issue #447](https://github.com/grafana/grafana/issues/447). Filter option loading when having muliple nested filters now works better. Options are now reloaded correctly and there are no multiple renders/refresh inbetween.
|
||||
- [Issue #447](https://github.com/grafana/grafana/issues/447). Filter option loading when having muliple nested filters now works better. Options are now reloaded correctly and there are no multiple renders/refresh in between.
|
||||
- [Issue #412](https://github.com/grafana/grafana/issues/412). After a filter option is changed and a nested template param is reloaded, if the current value exists after the options are reloaded the current selected value is kept.
|
||||
- [Issue #460](https://github.com/grafana/grafana/issues/460). Legend Current value did not display when value was zero
|
||||
- [Issue #328](https://github.com/grafana/grafana/issues/328). Fix to series toggling bug that caused annotations to be hidden when toggling/hiding series.
|
||||
|
@ -8,6 +8,4 @@ coverage:
|
||||
patch: yes
|
||||
changes: no
|
||||
|
||||
comment:
|
||||
layout: "diff"
|
||||
behavior: "once"
|
||||
comment: off
|
||||
|
@ -64,7 +64,7 @@
|
||||
#################################### Database ####################################
|
||||
[database]
|
||||
# You can configure the database connection by specifying type, host, name, user and password
|
||||
# as seperate properties or as on string using the url propertie.
|
||||
# as separate properties or as on string using the url properties.
|
||||
|
||||
# Either "mysql", "postgres" or "sqlite3", it's your choice
|
||||
;type = sqlite3
|
||||
|
@ -38,7 +38,7 @@ CACHE_QUERY_PORT = 7002
|
||||
|
||||
LOG_UPDATES = False
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# Enable AMQP if you want to receive metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
|
@ -41,7 +41,7 @@ PICKLE_RECEIVER_PORT = 2004
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Enable AMQP if you want to receve metrics using you amqp broker
|
||||
# Enable AMQP if you want to receive metrics using you amqp broker
|
||||
ENABLE_AMQP = True
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
|
@ -265,7 +265,7 @@ WHISPER_FALLOCATE_CREATE = True
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# Enable AMQP if you want to receive metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
|
@ -30,7 +30,7 @@ give_completer_focus = shift-space
|
||||
# pertain only to specific metric types.
|
||||
#
|
||||
# The dashboard presents only metrics that fall into specified naming schemes
|
||||
# defined in this file. This creates a simpler, more targetted view of the
|
||||
# defined in this file. This creates a simpler, more targeted view of the
|
||||
# data. The general form for defining a naming scheme is as follows:
|
||||
#
|
||||
#[Metric Type]
|
||||
|
@ -100,7 +100,7 @@
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1521715844826,
|
||||
"iteration": 1523320861623,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
@ -443,7 +443,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -522,7 +526,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -601,7 +609,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -680,7 +692,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -759,7 +775,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -838,7 +858,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -927,7 +951,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1026,7 +1054,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1115,7 +1147,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1196,7 +1232,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1285,7 +1325,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1366,7 +1410,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1455,7 +1503,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1536,7 +1588,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1619,7 +1675,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1702,7 +1762,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1792,7 +1856,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1875,7 +1943,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1965,7 +2037,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2048,7 +2124,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2138,7 +2218,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2221,7 +2305,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2311,7 +2399,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2394,7 +2486,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
@ -2504,5 +2600,5 @@
|
||||
"timezone": "",
|
||||
"title": "Microsoft SQL Server Data Source Test",
|
||||
"uid": "GlAqcPgmz",
|
||||
"version": 57
|
||||
"version": 58
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "DS_MYSQL",
|
||||
"label": "Mysql",
|
||||
"label": "MySQL",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "mysql",
|
||||
@ -20,19 +20,19 @@
|
||||
"type": "panel",
|
||||
"id": "graph",
|
||||
"name": "Graph",
|
||||
"version": ""
|
||||
"version": "5.0.0"
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "mysql",
|
||||
"name": "MySQL",
|
||||
"version": "1.0.0"
|
||||
"version": "5.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "table",
|
||||
"name": "Table",
|
||||
"version": ""
|
||||
"version": "5.0.0"
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
@ -53,7 +53,7 @@
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1518602729468,
|
||||
"iteration": 1523372133566,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
@ -118,7 +118,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": "1h",
|
||||
"timeShift": null,
|
||||
"title": "Average logins / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
@ -150,7 +150,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -204,7 +208,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": "1h",
|
||||
"timeShift": null,
|
||||
"title": "Average payments started/ended / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
@ -236,7 +240,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -284,7 +292,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": "1h",
|
||||
"timeShift": null,
|
||||
"title": "Max CPU / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
@ -316,7 +324,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"columns": [],
|
||||
@ -369,7 +381,7 @@
|
||||
"target": ""
|
||||
}
|
||||
],
|
||||
"timeShift": "1h",
|
||||
"timeShift": null,
|
||||
"title": "Values",
|
||||
"transform": "table",
|
||||
"type": "table"
|
||||
@ -428,7 +440,6 @@
|
||||
"auto_count": 5,
|
||||
"auto_min": "10s",
|
||||
"current": {
|
||||
"selected": true,
|
||||
"text": "1m",
|
||||
"value": "1m"
|
||||
},
|
||||
@ -545,5 +556,5 @@
|
||||
"timezone": "",
|
||||
"title": "Grafana Fake Data Gen - MySQL",
|
||||
"uid": "DGsCac3kz",
|
||||
"version": 6
|
||||
"version": 8
|
||||
}
|
@ -7,9 +7,6 @@
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3306:3306"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all]
|
||||
|
||||
fake-mysql-data:
|
||||
|
3
docker/blocks/mysql_tests/Dockerfile
Normal file
3
docker/blocks/mysql_tests/Dockerfile
Normal file
@ -0,0 +1,3 @@
|
||||
FROM mysql:latest
|
||||
ADD setup.sql /docker-entrypoint-initdb.d
|
||||
CMD ["mysqld"]
|
@ -7,14 +7,6 @@
|
||||
"type": "datasource",
|
||||
"pluginId": "mysql",
|
||||
"pluginName": "MySQL"
|
||||
},
|
||||
{
|
||||
"name": "DS_MSSQL_TEST",
|
||||
"label": "MSSQL Test",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "mssql",
|
||||
"pluginName": "Microsoft SQL Server"
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
@ -30,12 +22,6 @@
|
||||
"name": "Graph",
|
||||
"version": "5.0.0"
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "mssql",
|
||||
"name": "Microsoft SQL Server",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "mysql",
|
||||
@ -114,7 +100,7 @@
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1521715720483,
|
||||
"iteration": 1523320712115,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
@ -349,7 +335,7 @@
|
||||
{
|
||||
"alias": "Time",
|
||||
"dateFormat": "YYYY-MM-DD HH:mm:ss",
|
||||
"pattern": "time_sec",
|
||||
"pattern": "time",
|
||||
"type": "date"
|
||||
},
|
||||
{
|
||||
@ -457,7 +443,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -536,7 +526,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -615,7 +609,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -694,7 +692,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -773,7 +775,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -852,7 +858,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -941,7 +951,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1034,7 +1048,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1123,7 +1141,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1204,7 +1226,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1293,7 +1319,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1374,7 +1404,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1463,7 +1497,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1544,7 +1582,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1634,14 +1676,18 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": true,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_MSSQL_TEST}",
|
||||
"datasource": "${DS_MYSQL_TEST}",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
@ -1717,7 +1763,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1807,7 +1857,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1890,7 +1944,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1980,7 +2038,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2063,7 +2125,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2153,7 +2219,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2236,7 +2306,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
@ -2315,8 +2389,8 @@
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "2018-03-15T11:30:00.000Z",
|
||||
"to": "2018-03-15T12:55:01.000Z"
|
||||
"from": "2018-03-15T12:30:00.000Z",
|
||||
"to": "2018-03-15T13:55:01.000Z"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
@ -2346,5 +2420,5 @@
|
||||
"timezone": "",
|
||||
"title": "MySQL Data Source Test",
|
||||
"uid": "Hmf8FDkmz",
|
||||
"version": 9
|
||||
"version": 12
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
mysqltests:
|
||||
image: mysql:latest
|
||||
build:
|
||||
context: blocks/mysql_tests
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: grafana_tests
|
||||
@ -7,7 +8,4 @@
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3306:3306"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
tmpfs: /var/lib/mysql:rw
|
||||
|
2
docker/blocks/mysql_tests/setup.sql
Normal file
2
docker/blocks/mysql_tests/setup.sql
Normal file
@ -0,0 +1,2 @@
|
||||
CREATE DATABASE grafana_ds_tests;
|
||||
GRANT ALL PRIVILEGES ON grafana_ds_tests.* TO 'grafana';
|
3
docker/blocks/postgres_tests/Dockerfile
Normal file
3
docker/blocks/postgres_tests/Dockerfile
Normal file
@ -0,0 +1,3 @@
|
||||
FROM postgres:latest
|
||||
ADD setup.sql /docker-entrypoint-initdb.d
|
||||
CMD ["postgres"]
|
@ -100,7 +100,7 @@
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1521725946837,
|
||||
"iteration": 1523320929325,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
@ -443,7 +443,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -522,7 +526,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -601,7 +609,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -680,7 +692,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -759,7 +775,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -838,7 +858,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -927,7 +951,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1008,7 +1036,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1097,7 +1129,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1178,7 +1214,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1267,7 +1307,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1348,7 +1392,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1437,7 +1485,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1518,7 +1570,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1608,7 +1664,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1691,7 +1751,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1781,7 +1845,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1864,7 +1932,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -1954,7 +2026,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2037,7 +2113,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2127,7 +2207,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@ -2210,7 +2294,11 @@
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
|
@ -1,5 +1,6 @@
|
||||
postgrestest:
|
||||
image: postgres:latest
|
||||
build:
|
||||
context: blocks/postgres_tests
|
||||
environment:
|
||||
POSTGRES_USER: grafanatest
|
||||
POSTGRES_PASSWORD: grafanatest
|
||||
|
3
docker/blocks/postgres_tests/setup.sql
Normal file
3
docker/blocks/postgres_tests/setup.sql
Normal file
@ -0,0 +1,3 @@
|
||||
CREATE DATABASE grafanadstest;
|
||||
REVOKE CONNECT ON DATABASE grafanadstest FROM PUBLIC;
|
||||
GRANT CONNECT ON DATABASE grafanadstest TO grafanatest;
|
@ -22,6 +22,6 @@ log() {
|
||||
log $RUN_CMD
|
||||
$RUN_CMD
|
||||
|
||||
# Exit immidiately in case of any errors or when we have interactive terminal
|
||||
# Exit immediately in case of any errors or when we have interactive terminal
|
||||
if [[ $? != 0 ]] || test -t 0; then exit $?; fi
|
||||
log
|
||||
|
@ -206,7 +206,7 @@ When Grafana starts, it will update/insert all dashboards available in the confi
|
||||
|
||||
### Reuseable Dashboard Urls
|
||||
|
||||
If the dashboard in the json file contains an [uid](/reference/dashboard/#json-fields), Grafana will force insert/update on that uid. This allows you to migrate dashboards betweens Grafana instances and provisioning Grafana from configuration without breaking the urls given since the new dashboard url uses the uid as identifer.
|
||||
If the dashboard in the json file contains an [uid](/reference/dashboard/#json-fields), Grafana will force insert/update on that uid. This allows you to migrate dashboards betweens Grafana instances and provisioning Grafana from configuration without breaking the urls given since the new dashboard url uses the uid as identifier.
|
||||
When Grafana starts, it will update/insert all dashboards available in the configured folders. If you modify the file, the dashboard will also be updated.
|
||||
By default Grafana will delete dashboards in the database if the file is removed. You can disable this behavior using the `disableDeletion` setting.
|
||||
|
||||
|
@ -153,10 +153,10 @@ Prometheus Alertmanager | `prometheus-alertmanager` | no
|
||||
|
||||
# Enable images in notifications {#external-image-store}
|
||||
|
||||
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessable (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
|
||||
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessible (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
|
||||
Amazon S3, Webdav, Google Cloud Storage and Azure Blob Storage. So to set that up you need to configure the [external image uploader](/installation/configuration/#external-image-storage) in your grafana-server ini config file.
|
||||
|
||||
Be aware that some notifiers requires public access to the image to be able to include it in the notification. So make sure to enable public access to the images. If your using local image uploader, your Grafana instance need to be accessible by the internet.
|
||||
Be aware that some notifiers requires public access to the image to be able to include it in the notification. So make sure to enable public access to the images. If you're using local image uploader, your Grafana instance need to be accessible by the internet.
|
||||
|
||||
Currently only the Email Channels attaches images if no external image store is specified. To include images in alert notifications for other channels then you need to set up an external image store.
|
||||
|
||||
|
@ -110,7 +110,7 @@ to `Keep Last State` in order to basically ignore them.
|
||||
|
||||
## Notifications
|
||||
|
||||
In alert tab you can also specify alert rule notifications along with a detailed messsage about the alert rule.
|
||||
In alert tab you can also specify alert rule notifications along with a detailed message about the alert rule.
|
||||
The message can contain anything, information about how you might solve the issue, link to runbook, etc.
|
||||
|
||||
The actual notifications are configured and shared between multiple alerts. Read the
|
||||
|
@ -1,6 +1,6 @@
|
||||
+++
|
||||
title = "Contributor Licence Agreement (CLA)"
|
||||
description = "Contributer Licence Agreement (CLA)"
|
||||
description = "Contributor Licence Agreement (CLA)"
|
||||
type = "docs"
|
||||
aliases = ["/project/cla", "docs/contributing/cla.html"]
|
||||
[menu.docs]
|
||||
@ -101,4 +101,4 @@ TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU [OR US]
|
||||
<br>
|
||||
<br>
|
||||
<br>
|
||||
This CLA aggreement is based on the [Harmony Contributor Aggrement Template (combined)](http://www.harmonyagreements.org/agreements.html), [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/)
|
||||
This CLA agreement is based on the [Harmony Contributor Aggrement Template (combined)](http://www.harmonyagreements.org/agreements.html), [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/)
|
||||
|
@ -55,6 +55,22 @@ a time pattern for the index name or a wildcard.
|
||||
Be sure to specify your Elasticsearch version in the version selection dropdown. This is very important as there are differences how queries are composed. Currently only 2.x and 5.x
|
||||
are supported.
|
||||
|
||||
### Min time interval
|
||||
A lower limit for the auto group by time interval. Recommended to be set to write frequency, for example `1m` if your data is written every minute.
|
||||
This option can also be overridden/configured in a dashboard panel under data source options. It's important to note that this value **needs** to be formated as a
|
||||
number followed by a valid time identifier, e.g. `1m` (1 minute) or `30s` (30 seconds). The following time identifiers are supported:
|
||||
|
||||
Identifier | Description
|
||||
------------ | -------------
|
||||
`y` | year
|
||||
`M` | month
|
||||
`w` | week
|
||||
`d` | day
|
||||
`h` | hour
|
||||
`m` | minute
|
||||
`s` | second
|
||||
`ms` | millisecond
|
||||
|
||||
## Metric Query editor
|
||||
|
||||

|
||||
|
@ -39,6 +39,22 @@ Proxy access means that the Grafana backend will proxy all requests from the bro
|
||||
`grafana-server`. This means that the URL you specify needs to be accessible from the server you are running Grafana on. Proxy access
|
||||
mode is also more secure as the username & password will never reach the browser.
|
||||
|
||||
### Min time interval
|
||||
A lower limit for the auto group by time interval. Recommended to be set to write frequency, for example `1m` if your data is written every minute.
|
||||
This option can also be overridden/configured in a dashboard panel under data source options. It's important to note that this value **needs** to be formated as a
|
||||
number followed by a valid time identifier, e.g. `1m` (1 minute) or `30s` (30 seconds). The following time identifiers are supported:
|
||||
|
||||
Identifier | Description
|
||||
------------ | -------------
|
||||
`y` | year
|
||||
`M` | month
|
||||
`w` | week
|
||||
`d` | day
|
||||
`h` | hour
|
||||
`m` | minute
|
||||
`s` | second
|
||||
`ms` | millisecond
|
||||
|
||||
## Query Editor
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/influxdb_query_still.png" class="docs-image--no-shadow" animated-gif="/img/docs/v45/influxdb_query.gif" >}}
|
||||
|
@ -78,7 +78,7 @@ the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` o
|
||||
|
||||
### Nested Templating
|
||||
|
||||
One template variable can be used to filter tag values for another template varible. First parameter is the metric name,
|
||||
One template variable can be used to filter tag values for another template variable. First parameter is the metric name,
|
||||
second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables.
|
||||
Some examples are mentioned below to make nested template queries work successfully.
|
||||
|
||||
@ -106,4 +106,4 @@ datasources:
|
||||
jsonData:
|
||||
tsdbResolution: 1
|
||||
tsdbVersion: 1
|
||||
```
|
||||
```
|
||||
|
@ -14,7 +14,7 @@ weight = 4
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/alert-list-panel.png" max-width="850px" >}}
|
||||
|
||||
The alert list panel allows you to display your dashbords alerts. The list can be configured to show current state or recent state changes. You can read more about alerts [here](http://docs.grafana.org/alerting/rules).
|
||||
The alert list panel allows you to display your dashboards alerts. The list can be configured to show current state or recent state changes. You can read more about alerts [here](http://docs.grafana.org/alerting/rules).
|
||||
|
||||
## Alert List Options
|
||||
|
||||
|
@ -25,7 +25,7 @@ The dashboard list panel allows you to display dynamic links to other dashboards
|
||||
1. **Starred**: The starred dashboard selection displays starred dashboards in alphabetical order.
|
||||
2. **Recently Viewed**: The recently viewed dashboard selection displays recently viewed dashboards in alphabetical order.
|
||||
3. **Search**: The search dashboard selection displays dashboards by search query or tag(s).
|
||||
4. **Show Headings**: When show headings is ticked the choosen list selection(Starred, Recently Viewed, Search) is shown as a heading.
|
||||
4. **Show Headings**: When show headings is ticked the chosen list selection(Starred, Recently Viewed, Search) is shown as a heading.
|
||||
5. **Max Items**: Max items set the maximum of items in a list.
|
||||
6. **Query**: Here is where you enter your query you want to search by. Queries are case-insensitive, and partial values are accepted.
|
||||
7. **Tags**: Here is where you enter your tag(s) you want to search by. Note that existing tags will not appear as you type, and *are* case sensitive. To see a list of existing tags, you can always return to the dashboard, open the Dashboard Picker at the top and click `tags` link in the search bar.
|
||||
|
@ -30,7 +30,7 @@ The singlestat panel has a normal query editor to allow you define your exact me
|
||||
* **total** - The sum of all the non-null values in the series
|
||||
* **first** - The first value in the series
|
||||
* **delta** - The total incremental increase (of a counter) in the series. An attempt is made to account for counter resets, but this will only be accurate for single instance metrics. Used to show total counter increase in time series.
|
||||
* **diff** - The difference betwen 'current' (last value) and 'first'.
|
||||
* **diff** - The difference between 'current' (last value) and 'first'.
|
||||
* **range** - The difference between 'min' and 'max'. Useful the show the range of change for a gauge.
|
||||
2. **Prefix/Postfix**: The Prefix/Postfix fields let you define a custom label to appear *before/after* the value. The `$__name` variable can be used here to use the series name or alias from the metric query.
|
||||
3. **Units**: Units are appended to the the Singlestat within the panel, and will respect the color and threshold settings for the value.
|
||||
@ -70,7 +70,7 @@ Gauges gives a clear picture of how high a value is in it's context. It's a grea
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/singlestat-gauge-options.png" max-width="500px" class="docs-image--right docs-image--no-shadow">}}
|
||||
|
||||
1. **Show**: The show checkbox will toggle wether the gauge is shown in the panel. When unselected, only the Singlestat value will appear.
|
||||
1. **Show**: The show checkbox will toggle whether the gauge is shown in the panel. When unselected, only the Singlestat value will appear.
|
||||
2. **Min/Max**: This sets the start and end point for the gauge.
|
||||
3. **Threshold Labels**: Check if you want to show the threshold labels. Thresholds are set in the color options.
|
||||
4. **Threshold Markers**: Check if you want to have a second meter showing the thresholds.
|
||||
|
@ -15,7 +15,7 @@ support for multiple Cloudwatch credentials.
|
||||
<img src="/assets/img/features/table-panel.png">
|
||||
|
||||
The new table panel is very flexible, supporting both multiple modes for time series as well as for
|
||||
table, annotation and raw JSON data. It also provides date formating and value formating and coloring options.
|
||||
table, annotation and raw JSON data. It also provides date formatting and value formatting and coloring options.
|
||||
|
||||
### Time series to rows
|
||||
|
||||
|
@ -33,7 +33,7 @@ You can enable/disable the shared tooltip from the dashboard settings menu or cy
|
||||
|
||||
{{< imgbox max-width="60%" img="/img/docs/v41/helptext_for_panel_settings.png" caption="Hovering help text" >}}
|
||||
|
||||
You can set a help text in the general tab on any panel. The help text is using Markdown to enable better formating and linking to other sites that can provide more information.
|
||||
You can set a help text in the general tab on any panel. The help text is using Markdown to enable better formatting and linking to other sites that can provide more information.
|
||||
|
||||
<div class="clearfix"></div>
|
||||
|
||||
|
@ -12,7 +12,7 @@ weight = -4
|
||||
|
||||
# What's New in Grafana v4.5
|
||||
|
||||
## Hightlights
|
||||
## Highlights
|
||||
|
||||
### New prometheus query editor
|
||||
|
||||
@ -62,7 +62,7 @@ Datas source selection & options & help are now above your metric queries.
|
||||
### Minor Changes
|
||||
|
||||
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
|
||||
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
|
||||
* **InfluxDB**: Added parenthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
|
@ -45,7 +45,7 @@ This makes exploring and filtering Prometheus data much easier.
|
||||
|
||||
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
|
||||
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
|
||||
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
|
||||
* **Graph**: Add support for local formatting in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
|
||||
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
|
||||
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/2764)
|
||||
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)
|
||||
|
@ -307,7 +307,7 @@ Content-Type: application/json
|
||||
|
||||
`PUT /api/orgs/:orgId`
|
||||
|
||||
Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented yet.
|
||||
Update Organisation, fields *Address 1*, *Address 2*, *City* are not implemented yet.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
@ -436,4 +436,4 @@ HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"User removed from organization"}
|
||||
```
|
||||
```
|
||||
|
@ -482,7 +482,7 @@ Set api_url to the resource that returns [OpenID UserInfo](https://connect2id.co
|
||||
|
||||
First set up Grafana as an OpenId client "webapplication" in Okta. Then set the Base URIs to `https://<grafana domain>/` and set the Login redirect URIs to `https://<grafana domain>/login/generic_oauth`.
|
||||
|
||||
Finaly set up the generic oauth module like this:
|
||||
Finally set up the generic oauth module like this:
|
||||
```bash
|
||||
[auth.generic_oauth]
|
||||
name = Okta
|
||||
|
@ -12,7 +12,7 @@ weight = 4
|
||||
|
||||
# Installing using Docker
|
||||
|
||||
Grafana is very easy to install and run using the offical docker container.
|
||||
Grafana is very easy to install and run using the official docker container.
|
||||
|
||||
```bash
|
||||
$ docker run -d -p 3000:3000 grafana/grafana
|
||||
|
@ -25,7 +25,7 @@ Before upgrading it can be a good idea to backup your Grafana database. This wil
|
||||
|
||||
If you use sqlite you only need to make a backup of your `grafana.db` file. This is usually located at `/var/lib/grafana/grafana.db` on unix system.
|
||||
If you are unsure what database you use and where it is stored check you grafana configuration file. If you
|
||||
installed grafana to custom location using a binary tar/zip it is usally in `<grafana_install_dir>/data`.
|
||||
installed grafana to custom location using a binary tar/zip it is usually in `<grafana_install_dir>/data`.
|
||||
|
||||
#### mysql
|
||||
|
||||
|
@ -49,7 +49,7 @@ Click the back button to rewind to the previous Dashboard in the Playlist.
|
||||
In TV mode the top navbar, row & panel controls will all fade to transparent.
|
||||
|
||||
This happens automatically after one minute of user inactivity but can also be toggled manually
|
||||
with the `d v` sequence shortcut. Any mouse movement or keyboard action will
|
||||
with the `d v` sequence shortcut, or by appending the parameter `?inactive` to the dashboard URL. Any mouse movement or keyboard action will
|
||||
restore navbar & controls.
|
||||
|
||||
Another feature is the kiosk mode - in kiosk mode the navbar is completely hidden/removed from view. This can be enabled with the `d k`
|
||||
|
@ -168,7 +168,7 @@ Option | Description
|
||||
*Include All option* | Add a special `All` option whose value includes all options.
|
||||
*Custom all value* | By default the `All` value will include all options in combined expression. This can become very long and can have performance problems. Many times it can be better to specify a custom all value, like a wildcard regex. To make it possible to have custom regex, globs or lucene syntax in the **Custom all value** option it is never escaped so you will have to think avbout what is a valid value for your data source.
|
||||
|
||||
### Formating multiple values
|
||||
### Formatting multiple values
|
||||
|
||||
Interpolating a variable with multiple values selected is tricky as it is not straight forward how to format the multiple values to into a string that
|
||||
is valid in the given context where the variable is used. Grafana tries to solve this by allowing each data source plugin to
|
||||
@ -186,7 +186,7 @@ break the regex expression.
|
||||
**Elasticsearch** uses lucene query syntax, so the same variable would, in this case, be formatted as `("host1" OR "host2" OR "host3")`. In this case every value
|
||||
needs to be escaped so that the value can contain lucene control words and quotation marks.
|
||||
|
||||
#### Formating troubles
|
||||
#### Formatting troubles
|
||||
|
||||
Automatic escaping & formatting can cause problems and it can be tricky to grasp the logic is behind it.
|
||||
Especially for InfluxDB and Prometheus where the use of regex syntax requires that the variable is used in regex operator context.
|
||||
|
@ -108,7 +108,7 @@ In this example we use Apache as a reverseProxy in front of Grafana. Apache hand
|
||||
|
||||
* The next part of the configuration is the tricky part. We use Apache’s rewrite engine to create our **X-WEBAUTH-USER header**, populated with the authenticated user.
|
||||
|
||||
* **RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER}, NS]**: This line is a little bit of magic. What it does, is for every request use the rewriteEngines look-ahead (LA-U) feature to determine what the REMOTE_USER variable would be set to after processing the request. Then assign the result to the variable PROXY_USER. This is neccessary as the REMOTE_USER variable is not available to the RequestHeader function.
|
||||
* **RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER}, NS]**: This line is a little bit of magic. What it does, is for every request use the rewriteEngines look-ahead (LA-U) feature to determine what the REMOTE_USER variable would be set to after processing the request. Then assign the result to the variable PROXY_USER. This is necessary as the REMOTE_USER variable is not available to the RequestHeader function.
|
||||
|
||||
* **RequestHeader set X-WEBAUTH-USER “%{PROXY_USER}e”**: With the authenticated username now stored in the PROXY_USER variable, we create a new HTTP request header that will be sent to our backend Grafana containing the username.
|
||||
|
||||
@ -149,7 +149,7 @@ auto_sign_up = true
|
||||
|
||||
##### Grafana Container
|
||||
|
||||
For this example, we use the offical Grafana docker image available at [Docker Hub](https://hub.docker.com/r/grafana/grafana/)
|
||||
For this example, we use the official Grafana docker image available at [Docker Hub](https://hub.docker.com/r/grafana/grafana/)
|
||||
|
||||
* Create a file `grafana.ini` with the following contents
|
||||
|
||||
@ -166,7 +166,7 @@ header_property = username
|
||||
auto_sign_up = true
|
||||
```
|
||||
|
||||
* Launch the Grafana container, using our custom grafana.ini to replace `/etc/grafana/grafana.ini`. We dont expose any ports for this container as it will only be connected to by our Apache container.
|
||||
* Launch the Grafana container, using our custom grafana.ini to replace `/etc/grafana/grafana.ini`. We don't expose any ports for this container as it will only be connected to by our Apache container.
|
||||
|
||||
```bash
|
||||
docker run -i -v $(pwd)/grafana.ini:/etc/grafana/grafana.ini --name grafana grafana/grafana
|
||||
@ -174,7 +174,7 @@ docker run -i -v $(pwd)/grafana.ini:/etc/grafana/grafana.ini --name grafana graf
|
||||
|
||||
### Apache Container
|
||||
|
||||
For this example we use the offical Apache docker image available at [Docker Hub](https://hub.docker.com/_/httpd/)
|
||||
For this example we use the official Apache docker image available at [Docker Hub](https://hub.docker.com/_/httpd/)
|
||||
|
||||
* Create a file `httpd.conf` with the following contents
|
||||
|
||||
@ -244,4 +244,4 @@ ProxyPassReverse / http://grafana:3000/
|
||||
|
||||
### Use grafana.
|
||||
|
||||
With our Grafana and Apache containers running, you can now connect to http://localhost/ and log in using the username/password we created in the htpasswd file.
|
||||
With our Grafana and Apache containers running, you can now connect to http://localhost/ and log in using the username/password we created in the htpasswd file.
|
||||
|
@ -29,6 +29,11 @@ func GetDashboardPermissionList(c *m.ReqContext) Response {
|
||||
}
|
||||
|
||||
for _, perm := range acl {
|
||||
perm.UserAvatarUrl = dtos.GetGravatarUrl(perm.UserEmail)
|
||||
|
||||
if perm.TeamId > 0 {
|
||||
perm.TeamAvatarUrl = dtos.GetGravatarUrlWithDefault(perm.TeamEmail, perm.Team)
|
||||
}
|
||||
if perm.Slug != "" {
|
||||
perm.Url = m.GetDashboardFolderUrl(perm.IsFolder, perm.Uid, perm.Slug)
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ func TestDashboardPermissionApiEndpoint(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When trying to override inherited permissions with lower presedence", func() {
|
||||
Convey("When trying to override inherited permissions with lower precedence", func() {
|
||||
origNewGuardian := guardian.New
|
||||
guardian.MockDashboardGuardian(&guardian.FakeDashboardGuardian{
|
||||
CanAdminValue: true,
|
||||
|
@ -33,6 +33,12 @@ func GetFolderPermissionList(c *m.ReqContext) Response {
|
||||
perm.FolderId = folder.Id
|
||||
perm.DashboardId = 0
|
||||
|
||||
perm.UserAvatarUrl = dtos.GetGravatarUrl(perm.UserEmail)
|
||||
|
||||
if perm.TeamId > 0 {
|
||||
perm.TeamAvatarUrl = dtos.GetGravatarUrlWithDefault(perm.TeamEmail, perm.Team)
|
||||
}
|
||||
|
||||
if perm.Slug != "" {
|
||||
perm.Url = m.GetDashboardFolderUrl(perm.IsFolder, perm.Uid, perm.Slug)
|
||||
}
|
||||
|
@ -118,9 +118,14 @@ func setIndexViewData(c *m.ReqContext) (*dtos.IndexViewData, error) {
|
||||
})
|
||||
|
||||
if c.IsSignedIn {
|
||||
// Only set login if it's different from the name
|
||||
var login string
|
||||
if c.SignedInUser.Login != c.SignedInUser.NameOrFallback() {
|
||||
login = c.SignedInUser.Login
|
||||
}
|
||||
profileNode := &dtos.NavLink{
|
||||
Text: c.SignedInUser.NameOrFallback(),
|
||||
SubTitle: c.SignedInUser.Login,
|
||||
SubTitle: login,
|
||||
Id: "profile",
|
||||
Img: data.User.GravatarUrl,
|
||||
Url: setting.AppSubUrl + "/profile",
|
||||
@ -284,6 +289,7 @@ func setIndexViewData(c *m.ReqContext) (*dtos.IndexViewData, error) {
|
||||
|
||||
data.NavTree = append(data.NavTree, &dtos.NavLink{
|
||||
Text: "Help",
|
||||
SubTitle: fmt.Sprintf(`Grafana v%s (%s)`, setting.BuildVersion, setting.BuildCommit),
|
||||
Id: "help",
|
||||
Url: "#",
|
||||
Icon: "gicon gicon-question",
|
||||
|
@ -75,7 +75,7 @@ func GetTestDataScenarios(c *m.ReqContext) Response {
|
||||
return JSON(200, &result)
|
||||
}
|
||||
|
||||
// Genereates a index out of range error
|
||||
// Generates a index out of range error
|
||||
func GenerateError(c *m.ReqContext) Response {
|
||||
var array []string
|
||||
return JSON(200, array[20])
|
||||
|
@ -33,7 +33,7 @@ func ValidateOrgPlaylist(c *m.ReqContext) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(items) == 0 {
|
||||
if len(items) == 0 && c.Context.Req.Method != "DELETE" {
|
||||
c.JsonApiErr(404, "Playlist is empty", itemsErr)
|
||||
return
|
||||
}
|
||||
|
@ -189,12 +189,6 @@ func (proxy *DataSourceProxy) getDirector() func(req *http.Request) {
|
||||
}
|
||||
|
||||
func (proxy *DataSourceProxy) validateRequest() error {
|
||||
if proxy.ds.Type == m.DS_INFLUXDB {
|
||||
if proxy.ctx.Query("db") != proxy.ds.Database {
|
||||
return errors.New("Datasource is not configured to allow this database")
|
||||
}
|
||||
}
|
||||
|
||||
if !checkWhiteList(proxy.ctx, proxy.targetUrl.Host) {
|
||||
return errors.New("Target url is not a valid target")
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ type StaticOptions struct {
|
||||
// Expires defines which user-defined function to use for producing a HTTP Expires Header
|
||||
// https://developers.google.com/speed/docs/insights/LeverageBrowserCaching
|
||||
AddHeaders func(ctx *macaron.Context)
|
||||
// FileSystem is the interface for supporting any implmentation of file system.
|
||||
// FileSystem is the interface for supporting any implementation of file system.
|
||||
FileSystem http.FileSystem
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ func validateInput(c CommandLine, pluginFolder string) error {
|
||||
fileInfo, err := os.Stat(pluginsDir)
|
||||
if err != nil {
|
||||
if err = os.MkdirAll(pluginsDir, os.ModePerm); err != nil {
|
||||
return errors.New(fmt.Sprintf("pluginsDir (%s) is not a directory", pluginsDir))
|
||||
return errors.New(fmt.Sprintf("pluginsDir (%s) is not a writable directory", pluginsDir))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ func Init(version string, skipTLSVerify bool) {
|
||||
}
|
||||
|
||||
HttpClient = http.Client{
|
||||
Timeout: time.Duration(10 * time.Second),
|
||||
Timeout: 10 * time.Second,
|
||||
Transport: tr,
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ func New(orgId int64, name string) KeyGenResult {
|
||||
|
||||
jsonString, _ := json.Marshal(jsonKey)
|
||||
|
||||
result.ClientSecret = base64.StdEncoding.EncodeToString([]byte(jsonString))
|
||||
result.ClientSecret = base64.StdEncoding.EncodeToString(jsonString)
|
||||
return result
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ func Decode(keyString string) (*ApiKeyJson, error) {
|
||||
}
|
||||
|
||||
var keyObj ApiKeyJson
|
||||
err = json.Unmarshal([]byte(jsonString), &keyObj)
|
||||
err = json.Unmarshal(jsonString, &keyObj)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidApiKey
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// changeTypeToSymbol is used for populating the terminating characer in
|
||||
// changeTypeToSymbol is used for populating the terminating character in
|
||||
// the diff
|
||||
changeTypeToSymbol = map[ChangeType]string{
|
||||
ChangeNil: "",
|
||||
|
@ -76,10 +76,10 @@ func TestFirst(t *testing.T) {
|
||||
assert.True(s == "fallback", "must get string return fallback")
|
||||
|
||||
s, err = j.GetString("name")
|
||||
assert.True(s == "anton" && err == nil, "name shoud match")
|
||||
assert.True(s == "anton" && err == nil, "name should match")
|
||||
|
||||
s, err = j.GetString("address", "street")
|
||||
assert.True(s == "Street 42" && err == nil, "street shoud match")
|
||||
assert.True(s == "Street 42" && err == nil, "street should match")
|
||||
//log.Println("s: ", s.String())
|
||||
|
||||
_, err = j.GetNumber("age")
|
||||
|
@ -225,7 +225,7 @@ func (a *Auth) SignRequest(req *http.Request) {
|
||||
)
|
||||
decodedKey, _ := base64.StdEncoding.DecodeString(a.Key)
|
||||
|
||||
sha256 := hmac.New(sha256.New, []byte(decodedKey))
|
||||
sha256 := hmac.New(sha256.New, decodedKey)
|
||||
sha256.Write([]byte(strToSign))
|
||||
|
||||
signature := base64.StdEncoding.EncodeToString(sha256.Sum(nil))
|
||||
|
@ -50,7 +50,7 @@ func (f *Float) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
switch x := v.(type) {
|
||||
case float64:
|
||||
f.Float64 = float64(x)
|
||||
f.Float64 = x
|
||||
case map[string]interface{}:
|
||||
err = json.Unmarshal(data, &f.NullFloat64)
|
||||
case nil:
|
||||
|
@ -54,6 +54,7 @@ var (
|
||||
M_Alerting_Active_Alerts prometheus.Gauge
|
||||
M_StatTotal_Dashboards prometheus.Gauge
|
||||
M_StatTotal_Users prometheus.Gauge
|
||||
M_StatActive_Users prometheus.Gauge
|
||||
M_StatTotal_Orgs prometheus.Gauge
|
||||
M_StatTotal_Playlists prometheus.Gauge
|
||||
M_Grafana_Version *prometheus.GaugeVec
|
||||
@ -253,6 +254,12 @@ func init() {
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_StatActive_Users = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "stat_active_users",
|
||||
Help: "number of active users",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_StatTotal_Orgs = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "stat_total_orgs",
|
||||
Help: "total amount of orgs",
|
||||
@ -270,7 +277,6 @@ func init() {
|
||||
Help: "Information about the Grafana",
|
||||
Namespace: exporterName,
|
||||
}, []string{"version"})
|
||||
|
||||
}
|
||||
|
||||
func initMetricVars(settings *MetricSettings) {
|
||||
@ -305,6 +311,7 @@ func initMetricVars(settings *MetricSettings) {
|
||||
M_Alerting_Active_Alerts,
|
||||
M_StatTotal_Dashboards,
|
||||
M_StatTotal_Users,
|
||||
M_StatActive_Users,
|
||||
M_StatTotal_Orgs,
|
||||
M_StatTotal_Playlists,
|
||||
M_Grafana_Version)
|
||||
@ -315,35 +322,36 @@ func initMetricVars(settings *MetricSettings) {
|
||||
func instrumentationLoop(settings *MetricSettings) chan struct{} {
|
||||
M_Instance_Start.Inc()
|
||||
|
||||
// set the total stats gauges before we publishing metrics
|
||||
updateTotalStats()
|
||||
|
||||
onceEveryDayTick := time.NewTicker(time.Hour * 24)
|
||||
secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds))
|
||||
everyMinuteTicker := time.NewTicker(time.Minute)
|
||||
defer onceEveryDayTick.Stop()
|
||||
defer everyMinuteTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-onceEveryDayTick.C:
|
||||
sendUsageStats()
|
||||
case <-secondTicker.C:
|
||||
case <-everyMinuteTicker.C:
|
||||
updateTotalStats()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var metricPublishCounter int64 = 0
|
||||
|
||||
func updateTotalStats() {
|
||||
metricPublishCounter++
|
||||
if metricPublishCounter == 1 || metricPublishCounter%10 == 0 {
|
||||
statsQuery := models.GetSystemStatsQuery{}
|
||||
if err := bus.Dispatch(&statsQuery); err != nil {
|
||||
metricsLogger.Error("Failed to get system stats", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
M_StatTotal_Dashboards.Set(float64(statsQuery.Result.Dashboards))
|
||||
M_StatTotal_Users.Set(float64(statsQuery.Result.Users))
|
||||
M_StatTotal_Playlists.Set(float64(statsQuery.Result.Playlists))
|
||||
M_StatTotal_Orgs.Set(float64(statsQuery.Result.Orgs))
|
||||
statsQuery := models.GetSystemStatsQuery{}
|
||||
if err := bus.Dispatch(&statsQuery); err != nil {
|
||||
metricsLogger.Error("Failed to get system stats", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
M_StatTotal_Dashboards.Set(float64(statsQuery.Result.Dashboards))
|
||||
M_StatTotal_Users.Set(float64(statsQuery.Result.Users))
|
||||
M_StatActive_Users.Set(float64(statsQuery.Result.ActiveUsers))
|
||||
M_StatTotal_Playlists.Set(float64(statsQuery.Result.Playlists))
|
||||
M_StatTotal_Orgs.Set(float64(statsQuery.Result.Orgs))
|
||||
}
|
||||
|
||||
func sendUsageStats() {
|
||||
@ -403,6 +411,6 @@ func sendUsageStats() {
|
||||
out, _ := json.MarshalIndent(report, "", " ")
|
||||
data := bytes.NewBuffer(out)
|
||||
|
||||
client := http.Client{Timeout: time.Duration(5 * time.Second)}
|
||||
client := http.Client{Timeout: 5 * time.Second}
|
||||
go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data)
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ var (
|
||||
slash = []byte("/")
|
||||
)
|
||||
|
||||
// stack returns a nicely formated stack frame, skipping skip frames
|
||||
// stack returns a nicely formatted stack frame, skipping skip frames
|
||||
func stack(skip int) []byte {
|
||||
buf := new(bytes.Buffer) // the returned data
|
||||
// As we loop, we open files and read them. These variables record the currently
|
||||
|
@ -56,7 +56,10 @@ type DashboardAclInfoDTO struct {
|
||||
UserId int64 `json:"userId"`
|
||||
UserLogin string `json:"userLogin"`
|
||||
UserEmail string `json:"userEmail"`
|
||||
UserAvatarUrl string `json:"userAvatarUrl"`
|
||||
TeamId int64 `json:"teamId"`
|
||||
TeamEmail string `json:"teamEmail"`
|
||||
TeamAvatarUrl string `json:"teamAvatarUrl"`
|
||||
Team string `json:"team"`
|
||||
Role *RoleType `json:"role,omitempty"`
|
||||
Permission PermissionType `json:"permission"`
|
||||
|
@ -158,7 +158,7 @@ func NewDashboardFromJson(data *simplejson.Json) *Dashboard {
|
||||
return dash
|
||||
}
|
||||
|
||||
// GetDashboardModel turns the command into the savable model
|
||||
// GetDashboardModel turns the command into the saveable model
|
||||
func (cmd *SaveDashboardCommand) GetDashboardModel() *Dashboard {
|
||||
dash := NewDashboardFromJson(cmd.Dashboard)
|
||||
userId := cmd.UserId
|
||||
|
@ -33,7 +33,7 @@ func (ds *DataSource) GetHttpClient() (*http.Client, error) {
|
||||
}
|
||||
|
||||
return &http.Client{
|
||||
Timeout: time.Duration(30 * time.Second),
|
||||
Timeout: 30 * time.Second,
|
||||
Transport: transport,
|
||||
}, nil
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ type Folder struct {
|
||||
HasAcl bool
|
||||
}
|
||||
|
||||
// GetDashboardModel turns the command into the savable model
|
||||
// GetDashboardModel turns the command into the saveable model
|
||||
func (cmd *CreateFolderCommand) GetDashboardModel(orgId int64, userId int64) *Dashboard {
|
||||
dashFolder := NewDashboardFolder(strings.TrimSpace(cmd.Title))
|
||||
dashFolder.OrgId = orgId
|
||||
|
@ -69,7 +69,7 @@ func (pb *PluginBase) registerPlugin(pluginDir string) error {
|
||||
|
||||
for _, include := range pb.Includes {
|
||||
if include.Role == "" {
|
||||
include.Role = m.RoleType(m.ROLE_VIEWER)
|
||||
include.Role = m.ROLE_VIEWER
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
|
||||
func TestPluginScans(t *testing.T) {
|
||||
|
||||
Convey("When scaning for plugins", t, func() {
|
||||
Convey("When scanning for plugins", t, func() {
|
||||
setting.StaticRootPath, _ = filepath.Abs("../../public/")
|
||||
setting.Cfg = ini.Empty()
|
||||
err := initPlugins(context.Background())
|
||||
|
@ -37,7 +37,7 @@ func GetPluginSettings(orgId int64) (map[string]*m.PluginSettingInfoDTO, error)
|
||||
|
||||
// if it's included in app check app settings
|
||||
if pluginDef.IncludedInAppId != "" {
|
||||
// app componets are by default disabled
|
||||
// app components are by default disabled
|
||||
opt.Enabled = false
|
||||
|
||||
if appSettings, ok := pluginMap[pluginDef.IncludedInAppId]; ok {
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
httpClient http.Client = http.Client{Timeout: time.Duration(10 * time.Second)}
|
||||
httpClient http.Client = http.Client{Timeout: 10 * time.Second}
|
||||
)
|
||||
|
||||
type GrafanaNetPlugin struct {
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
type FakeEvalHandler struct {
|
||||
SuccessCallID int // 0 means never sucess
|
||||
SuccessCallID int // 0 means never success
|
||||
CallNb int
|
||||
}
|
||||
|
||||
@ -87,7 +87,7 @@ func TestEngineProcessJob(t *testing.T) {
|
||||
|
||||
Convey("Should trigger as many retries as needed", func() {
|
||||
|
||||
Convey("never sucess -> max retries number", func() {
|
||||
Convey("never success -> max retries number", func() {
|
||||
expectedAttempts := alertMaxAttempts
|
||||
evalHandler := NewFakeEvalHandler(0)
|
||||
engine.evalHandler = evalHandler
|
||||
@ -96,7 +96,7 @@ func TestEngineProcessJob(t *testing.T) {
|
||||
So(evalHandler.CallNb, ShouldEqual, expectedAttempts)
|
||||
})
|
||||
|
||||
Convey("always sucess -> never retry", func() {
|
||||
Convey("always success -> never retry", func() {
|
||||
expectedAttempts := 1
|
||||
evalHandler := NewFakeEvalHandler(1)
|
||||
engine.evalHandler = evalHandler
|
||||
@ -105,7 +105,7 @@ func TestEngineProcessJob(t *testing.T) {
|
||||
So(evalHandler.CallNb, ShouldEqual, expectedAttempts)
|
||||
})
|
||||
|
||||
Convey("some errors before sucess -> some retries", func() {
|
||||
Convey("some errors before success -> some retries", func() {
|
||||
expectedAttempts := int(math.Ceil(float64(alertMaxAttempts) / 2))
|
||||
evalHandler := NewFakeEvalHandler(expectedAttempts)
|
||||
engine.evalHandler = evalHandler
|
||||
|
@ -111,7 +111,7 @@ func (this *HipChatNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
}
|
||||
|
||||
message := ""
|
||||
if evalContext.Rule.State != models.AlertStateOK { //dont add message when going back to alert state ok.
|
||||
if evalContext.Rule.State != models.AlertStateOK { //don't add message when going back to alert state ok.
|
||||
message += " " + evalContext.Rule.Message
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ func (this *LineNotifier) createAlert(evalContext *alerting.EvalContext) error {
|
||||
}
|
||||
|
||||
if err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil {
|
||||
this.log.Error("Failed to send notification to LINE", "error", err, "body", string(body))
|
||||
this.log.Error("Failed to send notification to LINE", "error", err, "body", body)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -129,7 +129,7 @@ func (this *SlackNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
}
|
||||
|
||||
message := this.Mention
|
||||
if evalContext.Rule.State != m.AlertStateOK { //dont add message when going back to alert state ok.
|
||||
if evalContext.Rule.State != m.AlertStateOK { //don't add message when going back to alert state ok.
|
||||
message += " " + evalContext.Rule.Message
|
||||
}
|
||||
image_url := ""
|
||||
|
@ -13,7 +13,7 @@ func init() {
|
||||
alerting.RegisterNotifier(&alerting.NotifierPlugin{
|
||||
Type: "teams",
|
||||
Name: "Microsoft Teams",
|
||||
Description: "Sends notifications using Incomming Webhook connector to Microsoft Teams",
|
||||
Description: "Sends notifications using Incoming Webhook connector to Microsoft Teams",
|
||||
Factory: NewTeamsNotifier,
|
||||
OptionsTemplate: `
|
||||
<h3 class="page-heading">Teams settings</h3>
|
||||
@ -76,7 +76,7 @@ func (this *TeamsNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
}
|
||||
|
||||
message := this.Mention
|
||||
if evalContext.Rule.State != m.AlertStateOK { //dont add message when going back to alert state ok.
|
||||
if evalContext.Rule.State != m.AlertStateOK { //don't add message when going back to alert state ok.
|
||||
message += " " + evalContext.Rule.Message
|
||||
} else {
|
||||
message += " " // summary must not be empty
|
||||
|
@ -100,7 +100,7 @@ func TestTelegramNotifier(t *testing.T) {
|
||||
So(caption, ShouldContainSubstring, "Some kind of message that is too long for appending to our pretty little message, this line is actually exactly 197 chars long and I will get there in the end I promise ")
|
||||
})
|
||||
|
||||
Convey("Metrics should be skipped if they dont fit", func() {
|
||||
Convey("Metrics should be skipped if they don't fit", func() {
|
||||
evalContext := alerting.NewEvalContext(nil, &alerting.Rule{
|
||||
Name: "This is an alarm",
|
||||
Message: "Some kind of message that is too long for appending to our pretty little message, this line is actually exactly 197 chars long and I will get there in the end I ",
|
||||
|
@ -56,7 +56,7 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error {
|
||||
|
||||
if err := bus.Dispatch(cmd); err != nil {
|
||||
if err == m.ErrCannotChangeStateOnPausedAlert {
|
||||
handler.log.Error("Cannot change state on alert thats pause", "error", err)
|
||||
handler.log.Error("Cannot change state on alert that's paused", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ func (s *SchedulerImpl) Tick(tickTime time.Time, execQueue chan *Job) {
|
||||
|
||||
if job.OffsetWait && now%job.Offset == 0 {
|
||||
job.OffsetWait = false
|
||||
s.enque(job, execQueue)
|
||||
s.enqueue(job, execQueue)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -66,13 +66,13 @@ func (s *SchedulerImpl) Tick(tickTime time.Time, execQueue chan *Job) {
|
||||
if job.Offset > 0 {
|
||||
job.OffsetWait = true
|
||||
} else {
|
||||
s.enque(job, execQueue)
|
||||
s.enqueue(job, execQueue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SchedulerImpl) enque(job *Job, execQueue chan *Job) {
|
||||
func (s *SchedulerImpl) enqueue(job *Job, execQueue chan *Job) {
|
||||
s.log.Debug("Scheduler: Putting job on to exec queue", "name", job.Rule.Name, "id", job.Rule.Id)
|
||||
execQueue <- job
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ func (g *dashboardGuardianImpl) checkAcl(permission m.PermissionType, acl []*m.D
|
||||
return false, err
|
||||
}
|
||||
|
||||
// evalute team rules
|
||||
// evaluate team rules
|
||||
for _, p := range acl {
|
||||
for _, ug := range teams {
|
||||
if ug.Id == p.TeamId && p.Permission >= permission {
|
||||
|
@ -58,7 +58,7 @@ func (cr *configReader) readConfig() ([]*DashboardsAsConfig, error) {
|
||||
|
||||
files, err := ioutil.ReadDir(cr.path)
|
||||
if err != nil {
|
||||
cr.log.Error("cant read dashboard provisioning files from directory", "path", cr.path)
|
||||
cr.log.Error("can't read dashboard provisioning files from directory", "path", cr.path)
|
||||
return dashboards, nil
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ func (cr *configReader) readConfig(path string) ([]*DatasourcesAsConfig, error)
|
||||
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
cr.log.Error("cant read datasource provisioning files from directory", "path", path)
|
||||
cr.log.Error("can't read datasource provisioning files from directory", "path", path)
|
||||
return datasources, nil
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
|
||||
}
|
||||
|
||||
err := GetAlertNotifications(cmd)
|
||||
fmt.Printf("errror %v", err)
|
||||
fmt.Printf("error %v", err)
|
||||
So(err, ShouldBeNil)
|
||||
So(cmd.Result, ShouldBeNil)
|
||||
})
|
||||
|
@ -92,6 +92,7 @@ func GetDashboardAclInfoList(query *m.GetDashboardAclInfoListQuery) error {
|
||||
u.login AS user_login,
|
||||
u.email AS user_email,
|
||||
ug.name AS team,
|
||||
ug.email AS team_email,
|
||||
d.title,
|
||||
d.slug,
|
||||
d.uid,
|
||||
|
@ -258,7 +258,7 @@ func InitTestDB(t *testing.T) *xorm.Engine {
|
||||
// x.ShowSQL()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init in memory sqllite3 db %v", err)
|
||||
t.Fatalf("Failed to init test database: %v", err)
|
||||
}
|
||||
|
||||
sqlutil.CleanDB(x)
|
||||
@ -269,3 +269,19 @@ func InitTestDB(t *testing.T) *xorm.Engine {
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
func IsTestDbMySql() bool {
|
||||
if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
|
||||
return db == dbMySql
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func IsTestDbPostgres() bool {
|
||||
if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
|
||||
return db == dbPostgres
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -68,6 +68,7 @@ func GetSystemStats(query *m.GetSystemStatsQuery) error {
|
||||
}
|
||||
|
||||
query.Result = &stats
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -333,6 +333,7 @@ func GetUserOrgList(query *m.GetUserOrgListQuery) error {
|
||||
sess.Join("INNER", "org", "org_user.org_id=org.id")
|
||||
sess.Where("org_user.user_id=?", query.UserId)
|
||||
sess.Cols("org.name", "org_user.role", "org_user.org_id")
|
||||
sess.OrderBy("org.name")
|
||||
err := sess.Find(&query.Result)
|
||||
return err
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ func (e *CloudWatchExecutor) executeAnnotationQuery(ctx context.Context, queryCo
|
||||
MetricName: aws.String(metricName),
|
||||
Dimensions: qd,
|
||||
Statistic: aws.String(s),
|
||||
Period: aws.Int64(int64(period)),
|
||||
Period: aws.Int64(period),
|
||||
}
|
||||
resp, err := svc.DescribeAlarmsForMetric(params)
|
||||
if err != nil {
|
||||
@ -88,7 +88,7 @@ func (e *CloudWatchExecutor) executeAnnotationQuery(ctx context.Context, queryCo
|
||||
MetricName: aws.String(metricName),
|
||||
Dimensions: qd,
|
||||
ExtendedStatistic: aws.String(s),
|
||||
Period: aws.Int64(int64(period)),
|
||||
Period: aws.Int64(period),
|
||||
}
|
||||
resp, err := svc.DescribeAlarmsForMetric(params)
|
||||
if err != nil {
|
||||
|
@ -82,11 +82,11 @@ func (m *MsSqlMacroEngine) evaluateMacro(name string, args []string) (string, er
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= DATEADD(s, %d, '1970-01-01') AND %s <= DATEADD(s, %d, '1970-01-01')", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%s >= DATEADD(s, %d, '1970-01-01') AND %s <= DATEADD(s, %d, '1970-01-01')", args[0], m.TimeRange.GetFromAsSecondsEpoch(), args[0], m.TimeRange.GetToAsSecondsEpoch()), nil
|
||||
case "__timeFrom":
|
||||
return fmt.Sprintf("DATEADD(second, %d, '1970-01-01')", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("DATEADD(second, %d, '1970-01-01')", m.TimeRange.GetFromAsSecondsEpoch()), nil
|
||||
case "__timeTo":
|
||||
return fmt.Sprintf("DATEADD(second, %d, '1970-01-01')", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("DATEADD(second, %d, '1970-01-01')", m.TimeRange.GetToAsSecondsEpoch()), nil
|
||||
case "__timeGroup":
|
||||
if len(args) < 2 {
|
||||
return "", fmt.Errorf("macro %v needs time column and interval", name)
|
||||
@ -113,11 +113,11 @@ func (m *MsSqlMacroEngine) evaluateMacro(name string, args []string) (string, er
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], m.TimeRange.GetFromAsSecondsEpoch(), args[0], m.TimeRange.GetToAsSecondsEpoch()), nil
|
||||
case "__unixEpochFrom":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%d", m.TimeRange.GetFromAsSecondsEpoch()), nil
|
||||
case "__unixEpochTo":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%d", m.TimeRange.GetToAsSecondsEpoch()), nil
|
||||
default:
|
||||
return "", fmt.Errorf("Unknown macro %v", name)
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"time"
|
||||
@ -13,112 +15,213 @@ import (
|
||||
func TestMacroEngine(t *testing.T) {
|
||||
Convey("MacroEngine", t, func() {
|
||||
engine := &MsSqlMacroEngine{}
|
||||
timeRange := &tsdb.TimeRange{From: "5m", To: "now"}
|
||||
query := &tsdb.Query{
|
||||
Model: simplejson.New(),
|
||||
}
|
||||
|
||||
Convey("interpolate __time function", func() {
|
||||
sql, err := engine.Interpolate(query, nil, "select $__time(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
Convey("Given a time range between 2018-04-12 00:00 and 2018-04-12 00:05", func() {
|
||||
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
|
||||
to := from.Add(5 * time.Minute)
|
||||
timeRange := tsdb.NewFakeTimeRange("5m", "now", to)
|
||||
|
||||
So(sql, ShouldEqual, "select time_column AS time")
|
||||
Convey("interpolate __time function", func() {
|
||||
sql, err := engine.Interpolate(query, nil, "select $__time(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select time_column AS time")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeEpoch function", func() {
|
||||
sql, err := engine.Interpolate(query, nil, "select $__timeEpoch(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select DATEDIFF(second, '1970-01-01', time_column) AS time")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeEpoch function wrapped in aggregation", func() {
|
||||
sql, err := engine.Interpolate(query, nil, "select min($__timeEpoch(time_column))")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select min(DATEDIFF(second, '1970-01-01', time_column) AS time)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= DATEADD(s, %d, '1970-01-01') AND time_column <= DATEADD(s, %d, '1970-01-01')", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY CAST(ROUND(DATEDIFF(second, '1970-01-01', time_column)/300.0, 0) as bigint)*300")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with spaces around arguments", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY CAST(ROUND(DATEDIFF(second, '1970-01-01', time_column)/300.0, 0) as bigint)*300")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with fill (value = NULL)", func() {
|
||||
_, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m', NULL)")
|
||||
|
||||
fill := query.Model.Get("fill").MustBool()
|
||||
fillNull := query.Model.Get("fillNull").MustBool()
|
||||
fillInterval := query.Model.Get("fillInterval").MustInt()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(fill, ShouldBeTrue)
|
||||
So(fillNull, ShouldBeTrue)
|
||||
So(fillInterval, ShouldEqual, 5*time.Minute.Seconds())
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with fill (value = float)", func() {
|
||||
_, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m', 1.5)")
|
||||
|
||||
fill := query.Model.Get("fill").MustBool()
|
||||
fillValue := query.Model.Get("fillValue").MustFloat64()
|
||||
fillInterval := query.Model.Get("fillInterval").MustInt()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(fill, ShouldBeTrue)
|
||||
So(fillValue, ShouldEqual, 1.5)
|
||||
So(fillInterval, ShouldEqual, 5*time.Minute.Seconds())
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select time_column >= %d AND time_column <= %d", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
|
||||
})
|
||||
})
|
||||
|
||||
Convey("interpolate __timeEpoch function", func() {
|
||||
sql, err := engine.Interpolate(query, nil, "select $__timeEpoch(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
Convey("Given a time range between 1960-02-01 07:00 and 1965-02-03 08:00", func() {
|
||||
from := time.Date(1960, 2, 1, 7, 0, 0, 0, time.UTC)
|
||||
to := time.Date(1965, 2, 3, 8, 0, 0, 0, time.UTC)
|
||||
timeRange := tsdb.NewTimeRange(strconv.FormatInt(from.UnixNano()/int64(time.Millisecond), 10), strconv.FormatInt(to.UnixNano()/int64(time.Millisecond), 10))
|
||||
|
||||
So(sql, ShouldEqual, "select DATEDIFF(second, '1970-01-01', time_column) AS time")
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= DATEADD(s, %d, '1970-01-01') AND time_column <= DATEADD(s, %d, '1970-01-01')", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select time_column >= %d AND time_column <= %d", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
|
||||
})
|
||||
})
|
||||
|
||||
Convey("interpolate __timeEpoch function wrapped in aggregation", func() {
|
||||
sql, err := engine.Interpolate(query, nil, "select min($__timeEpoch(time_column))")
|
||||
So(err, ShouldBeNil)
|
||||
Convey("Given a time range between 1960-02-01 07:00 and 1980-02-03 08:00", func() {
|
||||
from := time.Date(1960, 2, 1, 7, 0, 0, 0, time.UTC)
|
||||
to := time.Date(1980, 2, 3, 8, 0, 0, 0, time.UTC)
|
||||
timeRange := tsdb.NewTimeRange(strconv.FormatInt(from.UnixNano()/int64(time.Millisecond), 10), strconv.FormatInt(to.UnixNano()/int64(time.Millisecond), 10))
|
||||
|
||||
So(sql, ShouldEqual, "select min(DATEDIFF(second, '1970-01-01', time_column) AS time)")
|
||||
})
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= DATEADD(s, %d, '1970-01-01') AND time_column <= DATEADD(s, %d, '1970-01-01')", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
So(sql, ShouldEqual, "WHERE time_column >= DATEADD(s, 18446744066914186738, '1970-01-01') AND time_column <= DATEADD(s, 18446744066914187038, '1970-01-01')")
|
||||
})
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("interpolate __timeGroup function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
|
||||
So(err, ShouldBeNil)
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", from.Unix()))
|
||||
})
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY CAST(ROUND(DATEDIFF(second, '1970-01-01', time_column)/300.0, 0) as bigint)*300")
|
||||
})
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("interpolate __timeGroup function with spaces around arguments", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
|
||||
So(err, ShouldBeNil)
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select DATEADD(second, %d, '1970-01-01')", to.Unix()))
|
||||
})
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY CAST(ROUND(DATEDIFF(second, '1970-01-01', time_column)/300.0, 0) as bigint)*300")
|
||||
})
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("interpolate __timeGroup function with fill (value = NULL)", func() {
|
||||
_, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m', NULL)")
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select time_column >= %d AND time_column <= %d", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
fill := query.Model.Get("fill").MustBool()
|
||||
fillNull := query.Model.Get("fillNull").MustBool()
|
||||
fillInterval := query.Model.Get("fillInterval").MustInt()
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(fill, ShouldBeTrue)
|
||||
So(fillNull, ShouldBeTrue)
|
||||
So(fillInterval, ShouldEqual, 5*time.Minute.Seconds())
|
||||
})
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with fill (value = float)", func() {
|
||||
_, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m', 1.5)")
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
fill := query.Model.Get("fill").MustBool()
|
||||
fillValue := query.Model.Get("fillValue").MustFloat64()
|
||||
fillInterval := query.Model.Get("fillInterval").MustInt()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(fill, ShouldBeTrue)
|
||||
So(fillValue, ShouldEqual, 1.5)
|
||||
So(fillInterval, ShouldEqual, 5*time.Minute.Seconds())
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select DATEADD(second, 18446744066914186738, '1970-01-01')")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select DATEADD(second, 18446744066914187038, '1970-01-01')")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select time_column >= 18446744066914186738 AND time_column <= 18446744066914187038")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914187038")
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -8,8 +8,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"time"
|
||||
|
||||
"math"
|
||||
|
||||
_ "github.com/denisenkom/go-mssqldb"
|
||||
@ -231,15 +229,18 @@ func (e MssqlQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.
|
||||
return err
|
||||
}
|
||||
|
||||
// converts column named time to unix timestamp in milliseconds to make
|
||||
// native mysql datetime types and epoch dates work in
|
||||
// annotation and table queries.
|
||||
tsdb.ConvertSqlTimeColumnToEpochMs(values, timeIndex)
|
||||
|
||||
switch columnValue := values[timeIndex].(type) {
|
||||
case int64:
|
||||
timestamp = float64(columnValue * 1000)
|
||||
timestamp = float64(columnValue)
|
||||
case float64:
|
||||
timestamp = columnValue * 1000
|
||||
case time.Time:
|
||||
timestamp = (float64(columnValue.Unix()) * 1000) + float64(columnValue.Nanosecond()/1e6) // in case someone is trying to map times beyond 2262 :D
|
||||
timestamp = columnValue
|
||||
default:
|
||||
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp")
|
||||
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
|
||||
}
|
||||
|
||||
if metricIndex >= 0 {
|
||||
|
@ -16,10 +16,10 @@ import (
|
||||
)
|
||||
|
||||
// To run this test, remove the Skip from SkipConvey
|
||||
// and set up a MSSQL db named grafanatest and a user/password grafana/Password!
|
||||
// The tests require a MSSQL db named grafanatest and a user/password grafana/Password!
|
||||
// Use the docker/blocks/mssql_tests/docker-compose.yaml to spin up a
|
||||
// preconfigured MSSQL server suitable for running these tests.
|
||||
// Thers's also a dashboard.json in same directory that you can import to Grafana
|
||||
// There is also a dashboard.json in same directory that you can import to Grafana
|
||||
// once you've created a datasource for the test server/database.
|
||||
// If needed, change the variable below to the IP address of the database.
|
||||
var serverIP string = "localhost"
|
||||
@ -188,10 +188,8 @@ func TestMSSQL(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
for _, s := range series {
|
||||
_, err = sess.Insert(s)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
_, err = sess.InsertMulti(series)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("When doing a metric query using timeGroup", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
@ -312,10 +310,18 @@ func TestMSSQL(t *testing.T) {
|
||||
|
||||
Convey("Given a table with metrics having multiple values and measurements", func() {
|
||||
type metric_values struct {
|
||||
Time time.Time
|
||||
Measurement string
|
||||
ValueOne int64 `xorm:"integer 'valueOne'"`
|
||||
ValueTwo int64 `xorm:"integer 'valueTwo'"`
|
||||
Time time.Time
|
||||
TimeInt64 int64 `xorm:"bigint 'timeInt64' not null"`
|
||||
TimeInt64Nullable *int64 `xorm:"bigint 'timeInt64Nullable' null"`
|
||||
TimeFloat64 float64 `xorm:"float 'timeFloat64' not null"`
|
||||
TimeFloat64Nullable *float64 `xorm:"float 'timeFloat64Nullable' null"`
|
||||
TimeInt32 int32 `xorm:"int(11) 'timeInt32' not null"`
|
||||
TimeInt32Nullable *int32 `xorm:"int(11) 'timeInt32Nullable' null"`
|
||||
TimeFloat32 float32 `xorm:"float(11) 'timeFloat32' not null"`
|
||||
TimeFloat32Nullable *float32 `xorm:"float(11) 'timeFloat32Nullable' null"`
|
||||
Measurement string
|
||||
ValueOne int64 `xorm:"integer 'valueOne'"`
|
||||
ValueTwo int64 `xorm:"integer 'valueTwo'"`
|
||||
}
|
||||
|
||||
if exist, err := sess.IsTableExist(metric_values{}); err != nil || exist {
|
||||
@ -330,26 +336,219 @@ func TestMSSQL(t *testing.T) {
|
||||
return rand.Int63n(max-min) + min
|
||||
}
|
||||
|
||||
var tInitial time.Time
|
||||
|
||||
series := []*metric_values{}
|
||||
for _, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
|
||||
series = append(series, &metric_values{
|
||||
Time: t,
|
||||
Measurement: "Metric A",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
})
|
||||
series = append(series, &metric_values{
|
||||
Time: t,
|
||||
Measurement: "Metric B",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
})
|
||||
for i, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
|
||||
if i == 0 {
|
||||
tInitial = t
|
||||
}
|
||||
tSeconds := t.Unix()
|
||||
tSecondsInt32 := int32(tSeconds)
|
||||
tSecondsFloat32 := float32(tSeconds)
|
||||
tMilliseconds := tSeconds * 1e3
|
||||
tMillisecondsFloat := float64(tMilliseconds)
|
||||
first := metric_values{
|
||||
Time: t,
|
||||
TimeInt64: tMilliseconds,
|
||||
TimeInt64Nullable: &(tMilliseconds),
|
||||
TimeFloat64: tMillisecondsFloat,
|
||||
TimeFloat64Nullable: &tMillisecondsFloat,
|
||||
TimeInt32: tSecondsInt32,
|
||||
TimeInt32Nullable: &tSecondsInt32,
|
||||
TimeFloat32: tSecondsFloat32,
|
||||
TimeFloat32Nullable: &tSecondsFloat32,
|
||||
Measurement: "Metric A",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
}
|
||||
second := first
|
||||
second.Measurement = "Metric B"
|
||||
second.ValueOne = rnd(0, 100)
|
||||
second.ValueTwo = rnd(0, 100)
|
||||
|
||||
series = append(series, &first)
|
||||
series = append(series, &second)
|
||||
}
|
||||
|
||||
for _, s := range series {
|
||||
_, err = sess.Insert(s)
|
||||
_, err = sess.InsertMulti(series)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("When doing a metric query using epoch (int64) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT TOP 1 timeInt64 as time, valueOne FROM metric_values ORDER BY time`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (int64 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT TOP 1 timeInt64Nullable as time, valueOne FROM metric_values ORDER BY time`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float64) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT TOP 1 timeFloat64 as time, valueOne FROM metric_values ORDER BY time`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float64 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT TOP 1 timeFloat64Nullable as time, valueOne FROM metric_values ORDER BY time`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (int32) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT TOP 1 timeInt32 as time, valueOne FROM metric_values ORDER BY time`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (int32 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT TOP 1 timeInt32Nullable as time, valueOne FROM metric_values ORDER BY time`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float32) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT TOP 1 timeFloat32 as time, valueOne FROM metric_values ORDER BY time`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(float64(float32(tInitial.Unix())))*1e3)
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float32 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT TOP 1 timeFloat32Nullable as time, valueOne FROM metric_values ORDER BY time`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(float64(float32(tInitial.Unix())))*1e3)
|
||||
})
|
||||
|
||||
Convey("When doing a metric query grouping by time and select metric column should return correct series", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
@ -476,7 +675,6 @@ func TestMSSQL(t *testing.T) {
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
fmt.Println("query", "sql", queryResult.Meta)
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 4)
|
||||
@ -696,7 +894,7 @@ func TestMSSQL(t *testing.T) {
|
||||
columns := queryResult.Tables[0].Rows[0]
|
||||
|
||||
//Should be in milliseconds
|
||||
So(columns[0].(float64), ShouldEqual, float64(dt.Unix()*1000))
|
||||
So(columns[0].(float64), ShouldEqual, float64(dt.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing an annotation query with a time column in epoch second format should return ms", func() {
|
||||
@ -850,15 +1048,15 @@ func TestMSSQL(t *testing.T) {
|
||||
|
||||
func InitMSSQLTestDB(t *testing.T) *xorm.Engine {
|
||||
x, err := xorm.NewEngine(sqlutil.TestDB_Mssql.DriverName, strings.Replace(sqlutil.TestDB_Mssql.ConnStr, "localhost", serverIP, 1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init mssql db %v", err)
|
||||
}
|
||||
|
||||
x.DatabaseTZ = time.UTC
|
||||
x.TZLocation = time.UTC
|
||||
|
||||
// x.ShowSQL()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init mssql db %v", err)
|
||||
}
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
|
@ -77,11 +77,11 @@ func (m *MySqlMacroEngine) evaluateMacro(name string, args []string) (string, er
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= FROM_UNIXTIME(%d) AND %s <= FROM_UNIXTIME(%d)", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%s >= FROM_UNIXTIME(%d) AND %s <= FROM_UNIXTIME(%d)", args[0], m.TimeRange.GetFromAsSecondsEpoch(), args[0], m.TimeRange.GetToAsSecondsEpoch()), nil
|
||||
case "__timeFrom":
|
||||
return fmt.Sprintf("FROM_UNIXTIME(%d)", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("FROM_UNIXTIME(%d)", m.TimeRange.GetFromAsSecondsEpoch()), nil
|
||||
case "__timeTo":
|
||||
return fmt.Sprintf("FROM_UNIXTIME(%d)", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("FROM_UNIXTIME(%d)", m.TimeRange.GetToAsSecondsEpoch()), nil
|
||||
case "__timeGroup":
|
||||
if len(args) < 2 {
|
||||
return "", fmt.Errorf("macro %v needs time column and interval", name)
|
||||
@ -108,11 +108,11 @@ func (m *MySqlMacroEngine) evaluateMacro(name string, args []string) (string, er
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], m.TimeRange.GetFromAsSecondsEpoch(), args[0], m.TimeRange.GetToAsSecondsEpoch()), nil
|
||||
case "__unixEpochFrom":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%d", m.TimeRange.GetFromAsSecondsEpoch()), nil
|
||||
case "__unixEpochTo":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%d", m.TimeRange.GetToAsSecondsEpoch()), nil
|
||||
default:
|
||||
return "", fmt.Errorf("Unknown macro %v", name)
|
||||
}
|
||||
|
@ -1,7 +1,10 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
@ -11,79 +14,179 @@ func TestMacroEngine(t *testing.T) {
|
||||
Convey("MacroEngine", t, func() {
|
||||
engine := &MySqlMacroEngine{}
|
||||
query := &tsdb.Query{}
|
||||
timeRange := &tsdb.TimeRange{From: "5m", To: "now"}
|
||||
|
||||
Convey("interpolate __time function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__time(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
Convey("Given a time range between 2018-04-12 00:00 and 2018-04-12 00:05", func() {
|
||||
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
|
||||
to := from.Add(5 * time.Minute)
|
||||
timeRange := tsdb.NewFakeTimeRange("5m", "now", to)
|
||||
|
||||
So(sql, ShouldEqual, "select UNIX_TIMESTAMP(time_column) as time_sec")
|
||||
Convey("interpolate __time function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__time(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select UNIX_TIMESTAMP(time_column) as time_sec")
|
||||
})
|
||||
|
||||
Convey("interpolate __time function wrapped in aggregation", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select min($__time(time_column))")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select min(UNIX_TIMESTAMP(time_column) as time_sec)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function", func() {
|
||||
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY cast(cast(UNIX_TIMESTAMP(time_column)/(300) as signed)*300 as signed)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with spaces around arguments", func() {
|
||||
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY cast(cast(UNIX_TIMESTAMP(time_column)/(300) as signed)*300 as signed)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= FROM_UNIXTIME(%d) AND time_column <= FROM_UNIXTIME(%d)", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select time >= %d AND time <= %d", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
|
||||
})
|
||||
})
|
||||
|
||||
Convey("interpolate __time function wrapped in aggregation", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select min($__time(time_column))")
|
||||
So(err, ShouldBeNil)
|
||||
Convey("Given a time range between 1960-02-01 07:00 and 1965-02-03 08:00", func() {
|
||||
from := time.Date(1960, 2, 1, 7, 0, 0, 0, time.UTC)
|
||||
to := time.Date(1965, 2, 3, 8, 0, 0, 0, time.UTC)
|
||||
timeRange := tsdb.NewTimeRange(strconv.FormatInt(from.UnixNano()/int64(time.Millisecond), 10), strconv.FormatInt(to.UnixNano()/int64(time.Millisecond), 10))
|
||||
|
||||
So(sql, ShouldEqual, "select min(UNIX_TIMESTAMP(time_column) as time_sec)")
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= FROM_UNIXTIME(%d) AND time_column <= FROM_UNIXTIME(%d)", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select time >= %d AND time <= %d", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
|
||||
})
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
Convey("Given a time range between 1960-02-01 07:00 and 1980-02-03 08:00", func() {
|
||||
from := time.Date(1960, 2, 1, 7, 0, 0, 0, time.UTC)
|
||||
to := time.Date(1980, 2, 3, 8, 0, 0, 0, time.UTC)
|
||||
timeRange := tsdb.NewTimeRange(strconv.FormatInt(from.UnixNano()/int64(time.Millisecond), 10), strconv.FormatInt(to.UnixNano()/int64(time.Millisecond), 10))
|
||||
|
||||
So(sql, ShouldEqual, "WHERE time_column >= FROM_UNIXTIME(18446744066914186738) AND time_column <= FROM_UNIXTIME(18446744066914187038)")
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column >= FROM_UNIXTIME(%d) AND time_column <= FROM_UNIXTIME(%d)", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select time >= %d AND time <= %d", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
|
||||
})
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914186738)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function", func() {
|
||||
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY cast(cast(UNIX_TIMESTAMP(time_column)/(300) as signed)*300 as signed)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with spaces around arguments", func() {
|
||||
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY cast(cast(UNIX_TIMESTAMP(time_column)/(300) as signed)*300 as signed)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914187038)")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(18446744066914186738)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738 >= 18446744066914186738 AND 18446744066914186738 <= 18446744066914187038")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914187038")
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
"github.com/go-xorm/core"
|
||||
@ -239,15 +238,18 @@ func (e MysqlQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.
|
||||
return err
|
||||
}
|
||||
|
||||
// converts column named time to unix timestamp in milliseconds to make
|
||||
// native mysql datetime types and epoch dates work in
|
||||
// annotation and table queries.
|
||||
tsdb.ConvertSqlTimeColumnToEpochMs(values, timeIndex)
|
||||
|
||||
switch columnValue := values[timeIndex].(type) {
|
||||
case int64:
|
||||
timestamp = float64(columnValue * 1000)
|
||||
timestamp = float64(columnValue)
|
||||
case float64:
|
||||
timestamp = columnValue * 1000
|
||||
case time.Time:
|
||||
timestamp = float64(columnValue.UnixNano() / 1e6)
|
||||
timestamp = columnValue
|
||||
default:
|
||||
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
|
||||
return fmt.Errorf("Invalid type for column time/time_sec, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
|
||||
}
|
||||
|
||||
if metricIndex >= 0 {
|
||||
|
@ -3,25 +3,36 @@ package mysql
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/xorm"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/sqlutil"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
// To run this test, remove the Skip from SkipConvey
|
||||
// and set up a MySQL db named grafana_tests and a user/password grafana/password
|
||||
// To run this test, set runMySqlTests=true
|
||||
// Or from the commandline: GRAFANA_TEST_DB=mysql go test -v ./pkg/tsdb/mysql
|
||||
// The tests require a MySQL db named grafana_ds_tests and a user/password grafana/password
|
||||
// Use the docker/blocks/mysql_tests/docker-compose.yaml to spin up a
|
||||
// preconfigured MySQL server suitable for running these tests.
|
||||
// Thers's also a dashboard.json in same directory that you can import to Grafana
|
||||
// There is also a dashboard.json in same directory that you can import to Grafana
|
||||
// once you've created a datasource for the test server/database.
|
||||
func TestMySQL(t *testing.T) {
|
||||
SkipConvey("MySQL", t, func() {
|
||||
// change to true to run the MySQL tests
|
||||
runMySqlTests := false
|
||||
// runMySqlTests := true
|
||||
|
||||
if !(sqlstore.IsTestDbMySql() || runMySqlTests) {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
Convey("MySQL", t, func() {
|
||||
x := InitMySQLTestDB(t)
|
||||
|
||||
endpoint := &MysqlQueryEndpoint{
|
||||
@ -35,7 +46,7 @@ func TestMySQL(t *testing.T) {
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
|
||||
fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.Local)
|
||||
fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.UTC)
|
||||
|
||||
Convey("Given a table with different native data types", func() {
|
||||
if exists, err := sess.IsTableExist("mysql_types"); err != nil || exists {
|
||||
@ -121,9 +132,8 @@ func TestMySQL(t *testing.T) {
|
||||
So(column[7].(float64), ShouldEqual, 1.11)
|
||||
So(column[8].(float64), ShouldEqual, 2.22)
|
||||
So(*column[9].(*float32), ShouldEqual, 3.33)
|
||||
_, offset := time.Now().Zone()
|
||||
So(column[10].(time.Time), ShouldHappenWithin, time.Duration(10*time.Second), time.Now().Add(time.Duration(offset)*time.Second))
|
||||
So(column[11].(time.Time), ShouldHappenWithin, time.Duration(10*time.Second), time.Now().Add(time.Duration(offset)*time.Second))
|
||||
So(column[10].(time.Time), ShouldHappenWithin, time.Duration(10*time.Second), time.Now())
|
||||
So(column[11].(time.Time), ShouldHappenWithin, time.Duration(10*time.Second), time.Now())
|
||||
So(column[12].(string), ShouldEqual, "11:11:11")
|
||||
So(column[13].(int64), ShouldEqual, 2018)
|
||||
So(*column[14].(*[]byte), ShouldHaveSameTypeAs, []byte{1})
|
||||
@ -137,8 +147,8 @@ func TestMySQL(t *testing.T) {
|
||||
So(column[22].(string), ShouldEqual, "longblob")
|
||||
So(column[23].(string), ShouldEqual, "val2")
|
||||
So(column[24].(string), ShouldEqual, "a,b")
|
||||
So(column[25].(time.Time).Format("2006-01-02T00:00:00Z"), ShouldEqual, time.Now().Format("2006-01-02T00:00:00Z"))
|
||||
So(column[26].(float64), ShouldEqual, float64(1514764861000))
|
||||
So(column[25].(time.Time).Format("2006-01-02T00:00:00Z"), ShouldEqual, time.Now().UTC().Format("2006-01-02T00:00:00Z"))
|
||||
So(column[26].(float64), ShouldEqual, float64(1.514764861123456*1e12))
|
||||
So(column[27], ShouldEqual, nil)
|
||||
So(column[28], ShouldEqual, nil)
|
||||
So(column[29], ShouldEqual, "")
|
||||
@ -177,10 +187,8 @@ func TestMySQL(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
for _, s := range series {
|
||||
_, err = sess.Insert(s)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
_, err = sess.InsertMulti(series)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("When doing a metric query using timeGroup", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
@ -301,10 +309,19 @@ func TestMySQL(t *testing.T) {
|
||||
|
||||
Convey("Given a table with metrics having multiple values and measurements", func() {
|
||||
type metric_values struct {
|
||||
Time time.Time
|
||||
Measurement string
|
||||
ValueOne int64 `xorm:"integer 'valueOne'"`
|
||||
ValueTwo int64 `xorm:"integer 'valueTwo'"`
|
||||
Time time.Time `xorm:"datetime 'time' not null"`
|
||||
TimeNullable *time.Time `xorm:"datetime(6) 'timeNullable' null"`
|
||||
TimeInt64 int64 `xorm:"bigint(20) 'timeInt64' not null"`
|
||||
TimeInt64Nullable *int64 `xorm:"bigint(20) 'timeInt64Nullable' null"`
|
||||
TimeFloat64 float64 `xorm:"double 'timeFloat64' not null"`
|
||||
TimeFloat64Nullable *float64 `xorm:"double 'timeFloat64Nullable' null"`
|
||||
TimeInt32 int32 `xorm:"int(11) 'timeInt32' not null"`
|
||||
TimeInt32Nullable *int32 `xorm:"int(11) 'timeInt32Nullable' null"`
|
||||
TimeFloat32 float32 `xorm:"double 'timeFloat32' not null"`
|
||||
TimeFloat32Nullable *float32 `xorm:"double 'timeFloat32Nullable' null"`
|
||||
Measurement string
|
||||
ValueOne int64 `xorm:"integer 'valueOne'"`
|
||||
ValueTwo int64 `xorm:"integer 'valueTwo'"`
|
||||
}
|
||||
|
||||
if exist, err := sess.IsTableExist(metric_values{}); err != nil || exist {
|
||||
@ -319,26 +336,265 @@ func TestMySQL(t *testing.T) {
|
||||
return rand.Int63n(max-min) + min
|
||||
}
|
||||
|
||||
var tInitial time.Time
|
||||
|
||||
series := []*metric_values{}
|
||||
for _, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
|
||||
series = append(series, &metric_values{
|
||||
Time: t,
|
||||
Measurement: "Metric A",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
})
|
||||
series = append(series, &metric_values{
|
||||
Time: t,
|
||||
Measurement: "Metric B",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
})
|
||||
for i, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
|
||||
if i == 0 {
|
||||
tInitial = t
|
||||
}
|
||||
tSeconds := t.Unix()
|
||||
tSecondsInt32 := int32(tSeconds)
|
||||
tSecondsFloat32 := float32(tSeconds)
|
||||
tMilliseconds := tSeconds * 1e3
|
||||
tMillisecondsFloat := float64(tMilliseconds)
|
||||
t2 := t
|
||||
first := metric_values{
|
||||
Time: t,
|
||||
TimeNullable: &t2,
|
||||
TimeInt64: tMilliseconds,
|
||||
TimeInt64Nullable: &(tMilliseconds),
|
||||
TimeFloat64: tMillisecondsFloat,
|
||||
TimeFloat64Nullable: &tMillisecondsFloat,
|
||||
TimeInt32: tSecondsInt32,
|
||||
TimeInt32Nullable: &tSecondsInt32,
|
||||
TimeFloat32: tSecondsFloat32,
|
||||
TimeFloat32Nullable: &tSecondsFloat32,
|
||||
Measurement: "Metric A",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
}
|
||||
second := first
|
||||
second.Measurement = "Metric B"
|
||||
second.ValueOne = rnd(0, 100)
|
||||
second.ValueTwo = rnd(0, 100)
|
||||
|
||||
series = append(series, &first)
|
||||
series = append(series, &second)
|
||||
}
|
||||
|
||||
for _, s := range series {
|
||||
_, err := sess.Insert(s)
|
||||
_, err = sess.InsertMulti(series)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("When doing a metric query using time as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using time (nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT timeNullable as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (int64) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT timeInt64 as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (int64 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT timeInt64Nullable as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float64) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT timeFloat64 as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float64 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT timeFloat64Nullable as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (int32) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT timeInt32 as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (int32 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT timeInt32Nullable as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float32) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT timeFloat32 as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(float64(float32(tInitial.Unix())))*1e3)
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float32 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT timeFloat32Nullable as time, valueOne FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(float64(float32(tInitial.Unix())))*1e3)
|
||||
})
|
||||
|
||||
Convey("When doing a metric query grouping by time and select metric column should return correct series", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
@ -647,16 +903,16 @@ func TestMySQL(t *testing.T) {
|
||||
}
|
||||
|
||||
func InitMySQLTestDB(t *testing.T) *xorm.Engine {
|
||||
x, err := xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, sqlutil.TestDB_Mysql.ConnStr+"&parseTime=true")
|
||||
x.DatabaseTZ = time.Local
|
||||
x.TZLocation = time.Local
|
||||
|
||||
// x.ShowSQL()
|
||||
|
||||
x, err := xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, strings.Replace(sqlutil.TestDB_Mysql.ConnStr, "/grafana_tests", "/grafana_ds_tests", 1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init mysql db %v", err)
|
||||
}
|
||||
|
||||
x.DatabaseTZ = time.UTC
|
||||
x.TZLocation = time.UTC
|
||||
|
||||
// x.ShowSQL()
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ func TestOpenTsdbExecutor(t *testing.T) {
|
||||
|
||||
})
|
||||
|
||||
Convey("Build metric with downsampling diabled", func() {
|
||||
Convey("Build metric with downsampling disabled", func() {
|
||||
|
||||
query := &tsdb.Query{
|
||||
Model: simplejson.New(),
|
||||
|
@ -79,15 +79,15 @@ func (m *PostgresMacroEngine) evaluateMacro(name string, args []string) (string,
|
||||
}
|
||||
return fmt.Sprintf("extract(epoch from %s) as \"time\"", args[0]), nil
|
||||
case "__timeFilter":
|
||||
// dont use to_timestamp in this macro for redshift compatibility #9566
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("extract(epoch from %s) BETWEEN %d AND %d", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
|
||||
return fmt.Sprintf("%s BETWEEN '%s' AND '%s'", args[0], m.TimeRange.GetFromAsTimeUTC().Format(time.RFC3339), m.TimeRange.GetToAsTimeUTC().Format(time.RFC3339)), nil
|
||||
case "__timeFrom":
|
||||
return fmt.Sprintf("to_timestamp(%d)", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("'%s'", m.TimeRange.GetFromAsTimeUTC().Format(time.RFC3339)), nil
|
||||
case "__timeTo":
|
||||
return fmt.Sprintf("to_timestamp(%d)", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("'%s'", m.TimeRange.GetToAsTimeUTC().Format(time.RFC3339)), nil
|
||||
case "__timeGroup":
|
||||
if len(args) < 2 {
|
||||
return "", fmt.Errorf("macro %v needs time column and interval and optional fill value", name)
|
||||
@ -114,11 +114,11 @@ func (m *PostgresMacroEngine) evaluateMacro(name string, args []string) (string,
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], m.TimeRange.GetFromAsSecondsEpoch(), args[0], m.TimeRange.GetToAsSecondsEpoch()), nil
|
||||
case "__unixEpochFrom":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%d", m.TimeRange.GetFromAsSecondsEpoch()), nil
|
||||
case "__unixEpochTo":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
return fmt.Sprintf("%d", m.TimeRange.GetToAsSecondsEpoch()), nil
|
||||
default:
|
||||
return "", fmt.Errorf("Unknown macro %v", name)
|
||||
}
|
||||
|
@ -1,7 +1,10 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
@ -9,81 +12,181 @@ import (
|
||||
|
||||
func TestMacroEngine(t *testing.T) {
|
||||
Convey("MacroEngine", t, func() {
|
||||
engine := &PostgresMacroEngine{}
|
||||
engine := NewPostgresMacroEngine()
|
||||
query := &tsdb.Query{}
|
||||
timeRange := &tsdb.TimeRange{From: "5m", To: "now"}
|
||||
|
||||
Convey("interpolate __time function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__time(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
Convey("Given a time range between 2018-04-12 00:00 and 2018-04-12 00:05", func() {
|
||||
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
|
||||
to := from.Add(5 * time.Minute)
|
||||
timeRange := tsdb.NewFakeTimeRange("5m", "now", to)
|
||||
|
||||
So(sql, ShouldEqual, "select time_column AS \"time\"")
|
||||
Convey("interpolate __time function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__time(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select time_column AS \"time\"")
|
||||
})
|
||||
|
||||
Convey("interpolate __time function wrapped in aggregation", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select min($__time(time_column))")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select min(time_column AS \"time\")")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column BETWEEN '%s' AND '%s'", from.Format(time.RFC3339), to.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select '%s'", from.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function", func() {
|
||||
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY (extract(epoch from time_column)/300)::bigint*300 AS time")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with spaces between args", func() {
|
||||
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY (extract(epoch from time_column)/300)::bigint*300 AS time")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select '%s'", to.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select time >= %d AND time <= %d", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
|
||||
})
|
||||
})
|
||||
|
||||
Convey("interpolate __time function wrapped in aggregation", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select min($__time(time_column))")
|
||||
So(err, ShouldBeNil)
|
||||
Convey("Given a time range between 1960-02-01 07:00 and 1965-02-03 08:00", func() {
|
||||
from := time.Date(1960, 2, 1, 7, 0, 0, 0, time.UTC)
|
||||
to := time.Date(1965, 2, 3, 8, 0, 0, 0, time.UTC)
|
||||
timeRange := tsdb.NewTimeRange(strconv.FormatInt(from.UnixNano()/int64(time.Millisecond), 10), strconv.FormatInt(to.UnixNano()/int64(time.Millisecond), 10))
|
||||
|
||||
So(sql, ShouldEqual, "select min(time_column AS \"time\")")
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column BETWEEN '%s' AND '%s'", from.Format(time.RFC3339), to.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select '%s'", from.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select '%s'", to.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select time >= %d AND time <= %d", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
|
||||
})
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
Convey("Given a time range between 1960-02-01 07:00 and 1980-02-03 08:00", func() {
|
||||
from := time.Date(1960, 2, 1, 7, 0, 0, 0, time.UTC)
|
||||
to := time.Date(1980, 2, 3, 8, 0, 0, 0, time.UTC)
|
||||
timeRange := tsdb.NewTimeRange(strconv.FormatInt(from.UnixNano()/int64(time.Millisecond), 10), strconv.FormatInt(to.UnixNano()/int64(time.Millisecond), 10))
|
||||
|
||||
So(sql, ShouldEqual, "WHERE extract(epoch from time_column) BETWEEN 18446744066914186738 AND 18446744066914187038")
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column BETWEEN '%s' AND '%s'", from.Format(time.RFC3339), to.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select '%s'", from.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select '%s'", to.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select time >= %d AND time <= %d", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select %d", to.Unix()))
|
||||
})
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select to_timestamp(18446744066914186738)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function", func() {
|
||||
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY (extract(epoch from time_column)/300)::bigint*300 AS time")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with spaces between args", func() {
|
||||
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY (extract(epoch from time_column)/300)::bigint*300 AS time")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select to_timestamp(18446744066914187038)")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(18446744066914186738)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738 >= 18446744066914186738 AND 18446744066914186738 <= 18446744066914187038")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914187038")
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"math"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/core"
|
||||
"github.com/grafana/grafana/pkg/components/null"
|
||||
@ -219,13 +218,16 @@ func (e PostgresQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *co
|
||||
return err
|
||||
}
|
||||
|
||||
// converts column named time to unix timestamp in milliseconds to make
|
||||
// native mysql datetime types and epoch dates work in
|
||||
// annotation and table queries.
|
||||
tsdb.ConvertSqlTimeColumnToEpochMs(values, timeIndex)
|
||||
|
||||
switch columnValue := values[timeIndex].(type) {
|
||||
case int64:
|
||||
timestamp = float64(columnValue * 1000)
|
||||
timestamp = float64(columnValue)
|
||||
case float64:
|
||||
timestamp = columnValue * 1000
|
||||
case time.Time:
|
||||
timestamp = float64(columnValue.UnixNano() / 1e6)
|
||||
timestamp = columnValue
|
||||
default:
|
||||
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
|
||||
}
|
||||
|
@ -3,26 +3,37 @@ package postgres
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/xorm"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/sqlutil"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
_ "github.com/lib/pq"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
// To run this test, remove the Skip from SkipConvey
|
||||
// and set up a PostgreSQL db named grafanatest and a user/password grafanatest/grafanatest!
|
||||
// To run this test, set runPostgresTests=true
|
||||
// Or from the commandline: GRAFANA_TEST_DB=postgres go test -v ./pkg/tsdb/postgres
|
||||
// The tests require a PostgreSQL db named grafanadstest and a user/password grafanatest/grafanatest!
|
||||
// Use the docker/blocks/postgres_tests/docker-compose.yaml to spin up a
|
||||
// preconfigured Postgres server suitable for running these tests.
|
||||
// Thers's also a dashboard.json in same directory that you can import to Grafana
|
||||
// There is also a dashboard.json in same directory that you can import to Grafana
|
||||
// once you've created a datasource for the test server/database.
|
||||
func TestPostgres(t *testing.T) {
|
||||
SkipConvey("PostgreSQL", t, func() {
|
||||
// change to true to run the MySQL tests
|
||||
runPostgresTests := false
|
||||
// runPostgresTests := true
|
||||
|
||||
if !(sqlstore.IsTestDbPostgres() || runPostgresTests) {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
Convey("PostgreSQL", t, func() {
|
||||
x := InitPostgresTestDB(t)
|
||||
|
||||
endpoint := &PostgresQueryEndpoint{
|
||||
@ -156,10 +167,8 @@ func TestPostgres(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
for _, s := range series {
|
||||
_, err = sess.Insert(s)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
_, err = sess.InsertMulti(series)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("When doing a metric query using timeGroup", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
@ -280,10 +289,18 @@ func TestPostgres(t *testing.T) {
|
||||
|
||||
Convey("Given a table with metrics having multiple values and measurements", func() {
|
||||
type metric_values struct {
|
||||
Time time.Time
|
||||
Measurement string
|
||||
ValueOne int64 `xorm:"integer 'valueOne'"`
|
||||
ValueTwo int64 `xorm:"integer 'valueTwo'"`
|
||||
Time time.Time
|
||||
TimeInt64 int64 `xorm:"bigint 'timeInt64' not null"`
|
||||
TimeInt64Nullable *int64 `xorm:"bigint 'timeInt64Nullable' null"`
|
||||
TimeFloat64 float64 `xorm:"double 'timeFloat64' not null"`
|
||||
TimeFloat64Nullable *float64 `xorm:"double 'timeFloat64Nullable' null"`
|
||||
TimeInt32 int32 `xorm:"int(11) 'timeInt32' not null"`
|
||||
TimeInt32Nullable *int32 `xorm:"int(11) 'timeInt32Nullable' null"`
|
||||
TimeFloat32 float32 `xorm:"double 'timeFloat32' not null"`
|
||||
TimeFloat32Nullable *float32 `xorm:"double 'timeFloat32Nullable' null"`
|
||||
Measurement string
|
||||
ValueOne int64 `xorm:"integer 'valueOne'"`
|
||||
ValueTwo int64 `xorm:"integer 'valueTwo'"`
|
||||
}
|
||||
|
||||
if exist, err := sess.IsTableExist(metric_values{}); err != nil || exist {
|
||||
@ -298,26 +315,219 @@ func TestPostgres(t *testing.T) {
|
||||
return rand.Int63n(max-min) + min
|
||||
}
|
||||
|
||||
var tInitial time.Time
|
||||
|
||||
series := []*metric_values{}
|
||||
for _, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
|
||||
series = append(series, &metric_values{
|
||||
Time: t,
|
||||
Measurement: "Metric A",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
})
|
||||
series = append(series, &metric_values{
|
||||
Time: t,
|
||||
Measurement: "Metric B",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
})
|
||||
for i, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
|
||||
if i == 0 {
|
||||
tInitial = t
|
||||
}
|
||||
tSeconds := t.Unix()
|
||||
tSecondsInt32 := int32(tSeconds)
|
||||
tSecondsFloat32 := float32(tSeconds)
|
||||
tMilliseconds := tSeconds * 1e3
|
||||
tMillisecondsFloat := float64(tMilliseconds)
|
||||
first := metric_values{
|
||||
Time: t,
|
||||
TimeInt64: tMilliseconds,
|
||||
TimeInt64Nullable: &(tMilliseconds),
|
||||
TimeFloat64: tMillisecondsFloat,
|
||||
TimeFloat64Nullable: &tMillisecondsFloat,
|
||||
TimeInt32: tSecondsInt32,
|
||||
TimeInt32Nullable: &tSecondsInt32,
|
||||
TimeFloat32: tSecondsFloat32,
|
||||
TimeFloat32Nullable: &tSecondsFloat32,
|
||||
Measurement: "Metric A",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
}
|
||||
second := first
|
||||
second.Measurement = "Metric B"
|
||||
second.ValueOne = rnd(0, 100)
|
||||
second.ValueTwo = rnd(0, 100)
|
||||
|
||||
series = append(series, &first)
|
||||
series = append(series, &second)
|
||||
}
|
||||
|
||||
for _, s := range series {
|
||||
_, err := sess.Insert(s)
|
||||
_, err = sess.InsertMulti(series)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("When doing a metric query using epoch (int64) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT "timeInt64" as time, "valueOne" FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (int64 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT "timeInt64Nullable" as time, "valueOne" FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float64) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT "timeFloat64" as time, "valueOne" FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float64 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT "timeFloat64Nullable" as time, "valueOne" FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (int32) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT "timeInt32" as time, "valueOne" FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (int32 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT "timeInt32Nullable" as time, "valueOne" FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(tInitial.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float32) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT "timeFloat32" as time, "valueOne" FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(float64(float32(tInitial.Unix())))*1e3)
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using epoch (float32 nullable) as time column should return metric with time in milliseconds", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT "timeFloat32Nullable" as time, "valueOne" FROM metric_values ORDER BY time LIMIT 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 1)
|
||||
So(queryResult.Series[0].Points[0][1].Float64, ShouldEqual, float64(float64(float32(tInitial.Unix())))*1e3)
|
||||
})
|
||||
|
||||
Convey("When doing a metric query grouping by time and select metric column should return correct series", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
@ -473,7 +683,7 @@ func TestPostgres(t *testing.T) {
|
||||
columns := queryResult.Tables[0].Rows[0]
|
||||
|
||||
//Should be in milliseconds
|
||||
So(columns[0].(float64), ShouldEqual, float64(dt.Unix()*1000))
|
||||
So(columns[0].(float64), ShouldEqual, float64(dt.UnixNano()/1e6))
|
||||
})
|
||||
|
||||
Convey("When doing an annotation query with a time column in epoch second format should return ms", func() {
|
||||
@ -626,16 +836,16 @@ func TestPostgres(t *testing.T) {
|
||||
}
|
||||
|
||||
func InitPostgresTestDB(t *testing.T) *xorm.Engine {
|
||||
x, err := xorm.NewEngine(sqlutil.TestDB_Postgres.DriverName, sqlutil.TestDB_Postgres.ConnStr)
|
||||
x, err := xorm.NewEngine(sqlutil.TestDB_Postgres.DriverName, strings.Replace(sqlutil.TestDB_Postgres.ConnStr, "dbname=grafanatest", "dbname=grafanadstest", 1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init postgres db %v", err)
|
||||
}
|
||||
|
||||
x.DatabaseTZ = time.UTC
|
||||
x.TZLocation = time.UTC
|
||||
|
||||
// x.ShowSQL()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init postgres db %v", err)
|
||||
}
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
|
@ -108,8 +108,8 @@ func (e *PrometheusExecutor) Query(ctx context.Context, dsInfo *models.DataSourc
|
||||
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "alerting.prometheus")
|
||||
span.SetTag("expr", query.Expr)
|
||||
span.SetTag("start_unixnano", int64(query.Start.UnixNano()))
|
||||
span.SetTag("stop_unixnano", int64(query.End.UnixNano()))
|
||||
span.SetTag("start_unixnano", query.Start.UnixNano())
|
||||
span.SetTag("stop_unixnano", query.End.UnixNano())
|
||||
defer span.Finish()
|
||||
|
||||
value, err := client.QueryRange(ctx, query.Expr, timeRange)
|
||||
|
@ -135,16 +135,16 @@ func (e *DefaultSqlEngine) Query(
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ConvertTimeColumnToEpochMs converts column named time to unix timestamp in milliseconds
|
||||
// ConvertSqlTimeColumnToEpochMs converts column named time to unix timestamp in milliseconds
|
||||
// to make native datetime types and epoch dates work in annotation and table queries.
|
||||
func ConvertSqlTimeColumnToEpochMs(values RowValues, timeIndex int) {
|
||||
if timeIndex >= 0 {
|
||||
switch value := values[timeIndex].(type) {
|
||||
case time.Time:
|
||||
values[timeIndex] = EpochPrecisionToMs(float64(value.Unix()))
|
||||
values[timeIndex] = EpochPrecisionToMs(float64(value.UnixNano()))
|
||||
case *time.Time:
|
||||
if value != nil {
|
||||
values[timeIndex] = EpochPrecisionToMs(float64((*value).Unix()))
|
||||
values[timeIndex] = EpochPrecisionToMs(float64((*value).UnixNano()))
|
||||
}
|
||||
case int64:
|
||||
values[timeIndex] = int64(EpochPrecisionToMs(float64(value)))
|
||||
@ -152,12 +152,36 @@ func ConvertSqlTimeColumnToEpochMs(values RowValues, timeIndex int) {
|
||||
if value != nil {
|
||||
values[timeIndex] = int64(EpochPrecisionToMs(float64(*value)))
|
||||
}
|
||||
case uint64:
|
||||
values[timeIndex] = int64(EpochPrecisionToMs(float64(value)))
|
||||
case *uint64:
|
||||
if value != nil {
|
||||
values[timeIndex] = int64(EpochPrecisionToMs(float64(*value)))
|
||||
}
|
||||
case int32:
|
||||
values[timeIndex] = int64(EpochPrecisionToMs(float64(value)))
|
||||
case *int32:
|
||||
if value != nil {
|
||||
values[timeIndex] = int64(EpochPrecisionToMs(float64(*value)))
|
||||
}
|
||||
case uint32:
|
||||
values[timeIndex] = int64(EpochPrecisionToMs(float64(value)))
|
||||
case *uint32:
|
||||
if value != nil {
|
||||
values[timeIndex] = int64(EpochPrecisionToMs(float64(*value)))
|
||||
}
|
||||
case float64:
|
||||
values[timeIndex] = EpochPrecisionToMs(value)
|
||||
case *float64:
|
||||
if value != nil {
|
||||
values[timeIndex] = EpochPrecisionToMs(*value)
|
||||
}
|
||||
case float32:
|
||||
values[timeIndex] = EpochPrecisionToMs(float64(value))
|
||||
case *float32:
|
||||
if value != nil {
|
||||
values[timeIndex] = EpochPrecisionToMs(float64(*value))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -9,37 +10,177 @@ import (
|
||||
|
||||
func TestSqlEngine(t *testing.T) {
|
||||
Convey("SqlEngine", t, func() {
|
||||
Convey("Given row values with time columns when converting them", func() {
|
||||
dt := time.Date(2018, 3, 14, 21, 20, 6, 527e6, time.UTC)
|
||||
fixtures := make([]interface{}, 8)
|
||||
fixtures[0] = dt
|
||||
fixtures[1] = dt.Unix() * 1000
|
||||
fixtures[2] = dt.Unix()
|
||||
fixtures[3] = float64(dt.Unix() * 1000)
|
||||
fixtures[4] = float64(dt.Unix())
|
||||
dt := time.Date(2018, 3, 14, 21, 20, 6, int(527345*time.Microsecond), time.UTC)
|
||||
|
||||
var nilDt *time.Time
|
||||
var nilInt64 *int64
|
||||
var nilFloat64 *float64
|
||||
fixtures[5] = nilDt
|
||||
fixtures[6] = nilInt64
|
||||
fixtures[7] = nilFloat64
|
||||
Convey("Given row values with time.Time as time columns", func() {
|
||||
var nilPointer *time.Time
|
||||
|
||||
fixtures := make([]interface{}, 3)
|
||||
fixtures[0] = dt
|
||||
fixtures[1] = &dt
|
||||
fixtures[2] = nilPointer
|
||||
|
||||
for i := range fixtures {
|
||||
ConvertSqlTimeColumnToEpochMs(fixtures, i)
|
||||
}
|
||||
|
||||
Convey("Should convert sql time columns to epoch time in ms ", func() {
|
||||
expected := float64(dt.Unix() * 1000)
|
||||
Convey("When converting them should return epoch time with millisecond precision ", func() {
|
||||
expected := float64(dt.UnixNano()) / float64(time.Millisecond)
|
||||
So(fixtures[0].(float64), ShouldEqual, expected)
|
||||
So(fixtures[1].(int64), ShouldEqual, expected)
|
||||
So(fixtures[2].(int64), ShouldEqual, expected)
|
||||
So(fixtures[3].(float64), ShouldEqual, expected)
|
||||
So(fixtures[4].(float64), ShouldEqual, expected)
|
||||
So(fixtures[1].(float64), ShouldEqual, expected)
|
||||
So(fixtures[2], ShouldBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
So(fixtures[5], ShouldBeNil)
|
||||
Convey("Given row values with int64 as time columns", func() {
|
||||
tSeconds := dt.Unix()
|
||||
tMilliseconds := dt.UnixNano() / 1e6
|
||||
tNanoSeconds := dt.UnixNano()
|
||||
var nilPointer *int64
|
||||
|
||||
fixtures := make([]interface{}, 7)
|
||||
fixtures[0] = tSeconds
|
||||
fixtures[1] = &tSeconds
|
||||
fixtures[2] = tMilliseconds
|
||||
fixtures[3] = &tMilliseconds
|
||||
fixtures[4] = tNanoSeconds
|
||||
fixtures[5] = &tNanoSeconds
|
||||
fixtures[6] = nilPointer
|
||||
|
||||
for i := range fixtures {
|
||||
ConvertSqlTimeColumnToEpochMs(fixtures, i)
|
||||
}
|
||||
|
||||
Convey("When converting them should return epoch time with millisecond precision ", func() {
|
||||
So(fixtures[0].(int64), ShouldEqual, tSeconds*1e3)
|
||||
So(fixtures[1].(int64), ShouldEqual, tSeconds*1e3)
|
||||
So(fixtures[2].(int64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[3].(int64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[4].(int64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[5].(int64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[6], ShouldBeNil)
|
||||
So(fixtures[7], ShouldBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Given row values with uin64 as time columns", func() {
|
||||
tSeconds := uint64(dt.Unix())
|
||||
tMilliseconds := uint64(dt.UnixNano() / 1e6)
|
||||
tNanoSeconds := uint64(dt.UnixNano())
|
||||
var nilPointer *uint64
|
||||
|
||||
fixtures := make([]interface{}, 7)
|
||||
fixtures[0] = tSeconds
|
||||
fixtures[1] = &tSeconds
|
||||
fixtures[2] = tMilliseconds
|
||||
fixtures[3] = &tMilliseconds
|
||||
fixtures[4] = tNanoSeconds
|
||||
fixtures[5] = &tNanoSeconds
|
||||
fixtures[6] = nilPointer
|
||||
|
||||
for i := range fixtures {
|
||||
ConvertSqlTimeColumnToEpochMs(fixtures, i)
|
||||
}
|
||||
|
||||
Convey("When converting them should return epoch time with millisecond precision ", func() {
|
||||
So(fixtures[0].(int64), ShouldEqual, tSeconds*1e3)
|
||||
So(fixtures[1].(int64), ShouldEqual, tSeconds*1e3)
|
||||
So(fixtures[2].(int64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[3].(int64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[4].(int64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[5].(int64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[6], ShouldBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Given row values with int32 as time columns", func() {
|
||||
tSeconds := int32(dt.Unix())
|
||||
var nilInt *int32
|
||||
|
||||
fixtures := make([]interface{}, 3)
|
||||
fixtures[0] = tSeconds
|
||||
fixtures[1] = &tSeconds
|
||||
fixtures[2] = nilInt
|
||||
|
||||
for i := range fixtures {
|
||||
ConvertSqlTimeColumnToEpochMs(fixtures, i)
|
||||
}
|
||||
|
||||
Convey("When converting them should return epoch time with millisecond precision ", func() {
|
||||
So(fixtures[0].(int64), ShouldEqual, dt.Unix()*1e3)
|
||||
So(fixtures[1].(int64), ShouldEqual, dt.Unix()*1e3)
|
||||
So(fixtures[2], ShouldBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Given row values with uint32 as time columns", func() {
|
||||
tSeconds := uint32(dt.Unix())
|
||||
var nilInt *uint32
|
||||
|
||||
fixtures := make([]interface{}, 3)
|
||||
fixtures[0] = tSeconds
|
||||
fixtures[1] = &tSeconds
|
||||
fixtures[2] = nilInt
|
||||
|
||||
for i := range fixtures {
|
||||
ConvertSqlTimeColumnToEpochMs(fixtures, i)
|
||||
}
|
||||
|
||||
Convey("When converting them should return epoch time with millisecond precision ", func() {
|
||||
So(fixtures[0].(int64), ShouldEqual, dt.Unix()*1e3)
|
||||
So(fixtures[1].(int64), ShouldEqual, dt.Unix()*1e3)
|
||||
So(fixtures[2], ShouldBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Given row values with float64 as time columns", func() {
|
||||
tSeconds := float64(dt.UnixNano()) / float64(time.Second)
|
||||
tMilliseconds := float64(dt.UnixNano()) / float64(time.Millisecond)
|
||||
tNanoSeconds := float64(dt.UnixNano())
|
||||
var nilPointer *float64
|
||||
|
||||
fixtures := make([]interface{}, 7)
|
||||
fixtures[0] = tSeconds
|
||||
fixtures[1] = &tSeconds
|
||||
fixtures[2] = tMilliseconds
|
||||
fixtures[3] = &tMilliseconds
|
||||
fixtures[4] = tNanoSeconds
|
||||
fixtures[5] = &tNanoSeconds
|
||||
fixtures[6] = nilPointer
|
||||
|
||||
for i := range fixtures {
|
||||
ConvertSqlTimeColumnToEpochMs(fixtures, i)
|
||||
}
|
||||
|
||||
Convey("When converting them should return epoch time with millisecond precision ", func() {
|
||||
So(fixtures[0].(float64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[1].(float64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[2].(float64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[3].(float64), ShouldEqual, tMilliseconds)
|
||||
fmt.Println(fixtures[4].(float64))
|
||||
fmt.Println(tMilliseconds)
|
||||
So(fixtures[4].(float64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[5].(float64), ShouldEqual, tMilliseconds)
|
||||
So(fixtures[6], ShouldBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Given row values with float32 as time columns", func() {
|
||||
tSeconds := float32(dt.Unix())
|
||||
var nilInt *float32
|
||||
|
||||
fixtures := make([]interface{}, 3)
|
||||
fixtures[0] = tSeconds
|
||||
fixtures[1] = &tSeconds
|
||||
fixtures[2] = nilInt
|
||||
|
||||
for i := range fixtures {
|
||||
ConvertSqlTimeColumnToEpochMs(fixtures, i)
|
||||
}
|
||||
|
||||
Convey("When converting them should return epoch time with millisecond precision ", func() {
|
||||
So(fixtures[0].(float64), ShouldEqual, float32(dt.Unix()*1e3))
|
||||
So(fixtures[1].(float64), ShouldEqual, float32(dt.Unix()*1e3))
|
||||
So(fixtures[2], ShouldBeNil)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -15,6 +15,14 @@ func NewTimeRange(from, to string) *TimeRange {
|
||||
}
|
||||
}
|
||||
|
||||
func NewFakeTimeRange(from, to string, now time.Time) *TimeRange {
|
||||
return &TimeRange{
|
||||
From: from,
|
||||
To: to,
|
||||
now: now,
|
||||
}
|
||||
}
|
||||
|
||||
type TimeRange struct {
|
||||
From string
|
||||
To string
|
||||
@ -25,10 +33,26 @@ func (tr *TimeRange) GetFromAsMsEpoch() int64 {
|
||||
return tr.MustGetFrom().UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
|
||||
func (tr *TimeRange) GetFromAsSecondsEpoch() int64 {
|
||||
return tr.GetFromAsMsEpoch() / 1000
|
||||
}
|
||||
|
||||
func (tr *TimeRange) GetFromAsTimeUTC() time.Time {
|
||||
return tr.MustGetFrom().UTC()
|
||||
}
|
||||
|
||||
func (tr *TimeRange) GetToAsMsEpoch() int64 {
|
||||
return tr.MustGetTo().UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
|
||||
func (tr *TimeRange) GetToAsSecondsEpoch() int64 {
|
||||
return tr.GetToAsMsEpoch() / 1000
|
||||
}
|
||||
|
||||
func (tr *TimeRange) GetToAsTimeUTC() time.Time {
|
||||
return tr.MustGetTo().UTC()
|
||||
}
|
||||
|
||||
func (tr *TimeRange) MustGetFrom() time.Time {
|
||||
if res, err := tr.ParseFrom(); err != nil {
|
||||
return time.Unix(0, 0)
|
||||
@ -92,9 +116,14 @@ func (tr *TimeRange) ParseTo() (time.Time, error) {
|
||||
// EpochPrecisionToMs converts epoch precision to millisecond, if needed.
|
||||
// Only seconds to milliseconds supported right now
|
||||
func EpochPrecisionToMs(value float64) float64 {
|
||||
if int64(value)/1e10 == 0 {
|
||||
return float64(value * 1e3)
|
||||
s := strconv.FormatFloat(value, 'e', -1, 64)
|
||||
if strings.HasSuffix(s, "e+09") {
|
||||
return value * float64(1e3)
|
||||
}
|
||||
|
||||
return float64(value)
|
||||
if strings.HasSuffix(s, "e+18") {
|
||||
return value / float64(time.Millisecond)
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user