diff --git a/.editorconfig b/.editorconfig index 3701d80b453..386c27fceb8 100644 --- a/.editorconfig +++ b/.editorconfig @@ -3,7 +3,7 @@ root = true [*.go] indent_style = tab -indent_size = 2 +indent_size = 4 charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true diff --git a/.gitignore b/.gitignore index 721a2a71ad4..1ab7068c96a 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ public/css/*.min.css *.swp .idea/ *.iml +*.tmp .vscode/ /data/* diff --git a/CHANGELOG.md b/CHANGELOG.md index 024bc39d2dd..f2e27dade77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,25 +1,117 @@ -# 4.3.0 (unreleased) +# 4.4.0 (unreleased) + +## New Features +**Dashboard History**: View dashboard version history, compare any two versions (summary & json diffs), restore to old version. This big feature +was contributed by **Walmart Labs**. Big thanks to them for this massive contribution! +Initial feature request: [#4638](https://github.com/grafana/grafana/issues/4638) +Pull Request: [#8472](https://github.com/grafana/grafana/pull/8472) + +## Enhancements +* **Elasticsearch**: Added filter aggregation label [#8420](https://github.com/grafana/grafana/pull/8420), thx [@tianzk](github.com/tianzk) +* **Sensu**: Added option for source and handler [#8405](https://github.com/grafana/grafana/pull/8405), thx [@joemiller](github.com/joemiller) +* **CSV**: Configurable csv export datetime format [#8058](https://github.com/grafana/grafana/issues/8058), thx [@cederigo](github.com/cederigo) + +# 4.3.2 (2017-05-31) + +## Bug fixes + +* **InfluxDB**: Fixed issue with query editor not showing ALIAS BY input field when in text editor mode [#8459](https://github.com/grafana/grafana/issues/8459) +* **Graph Log Scale**: Fixed issue with log scale going below x-axis [#8244](https://github.com/grafana/grafana/issues/8244) +* **Playlist**: Fixed dashboard play order issue [#7688](https://github.com/grafana/grafana/issues/7688) +* **Elasticsearch**: Fixed table query issue with ES 2.x [#8467](https://github.com/grafana/grafana/issues/8467), thx [@goldeelox](https://github.com/goldeelox) + +## Changes +* **Lazy Loading Of Panels**: Panels are no longer loaded as they are scrolled into view, this was reverted due to Chrome bug, might be reintroduced when Chrome fixes it's JS blocking behavior on scroll. [#8500](https://github.com/grafana/grafana/issues/8500) + +# 4.3.1 (2017-05-23) + +## Bug fixes + +* **S3 image upload**: Fixed image url issue for us-east-1 (us standard) region. If you were missing slack images for alert notifications this should fix it. [#8444](https://github.com/grafana/grafana/issues/8444) + +# 4.3.0-stable (2017-05-23) + +## Bug fixes + +* **Gzip**: Fixed crash when gzip was enabled [#8380](https://github.com/grafana/grafana/issues/8380) +* **Graphite**: Fixed issue with Toggle edit mode did in query editor [#8377](https://github.com/grafana/grafana/issues/8377) +* **Alerting**: Fixed issue with state history not showing query execution errors [#8412](https://github.com/grafana/grafana/issues/8412) +* **Alerting**: Fixed issue with missing state history events/annotations when using sqlite3 database [#7992](https://github.com/grafana/grafana/issues/7992) +* **Sqlite**: Fixed with database table locked and using sqlite3 database [#7992](https://github.com/grafana/grafana/issues/7992) +* **Alerting**: Fixed issue with annotations showing up in unsaved dashboards, new graph & alert panel. [#8361](https://github.com/grafana/grafana/issues/8361) +* **webdav**: Fixed http proxy env variable support for webdav image upload [#7922](https://github.com/grafana/grafana/issues/79222), thx [@berghauz](https://github.com/berghauz) +* **Prometheus**: Fixed issue with hiding query [#8413](https://github.com/grafana/grafana/issues/8413) + +## Enhancements + +* **VictorOps**: Now supports panel image & auto resolve [#8431](https://github.com/grafana/grafana/pull/8431), thx [@davidmscott](https://github.com/davidmscott) +* **Alerting**: Alert annotations now provide more info [#8421](https://github.com/grafana/grafana/pull/8421) + +# 4.3.0-beta1 (2017-05-12) ## Enhancements * **InfluxDB**: influxdb query builder support for ORDER BY and LIMIT (allows TOPN queries) [#6065](https://github.com/grafana/grafana/issues/6065) Support influxdb's SLIMIT Feature [#7232](https://github.com/grafana/grafana/issues/7232) thx [@thuck](https://github.com/thuck) -* **InfluxDB**: Small fix for the "glow" when focus the field for LIMIT and SLIMIT [#7799](https://github.com/grafana/grafana/pull/7799) thx [@thuck](https://github.com/thuck) * **Panels**: Delay loading & Lazy load panels as they become visible (scrolled into view) [#5216](https://github.com/grafana/grafana/issues/5216) thx [@jifwin](https://github.com/jifwin) * **Graph**: Support auto grid min/max when using log scale [#3090](https://github.com/grafana/grafana/issues/3090), thx [@bigbenhur](https://github.com/bigbenhur) -* **Elasticsearch**: Support histogram aggregations [#3164](https://github.com/grafana/grafana/issues/3164) +* **Graph**: Support for histograms [#600](https://github.com/grafana/grafana/issues/600) +* **Prometheus**: Support table response formats (column per label) [#6140](https://github.com/grafana/grafana/issues/6140), thx [@mtanda](https://github.com/mtanda) +* **Single Stat Panel**: support for non time series data [#6564](https://github.com/grafana/grafana/issues/6564) +* **Server**: Monitoring Grafana (health check endpoint) [#3302](https://github.com/grafana/grafana/issues/3302) +* **Heatmap**: Heatmap Panel [#7934](https://github.com/grafana/grafana/pull/7934) +* **Elasticsearch**: histogram aggregation [#3164](https://github.com/grafana/grafana/issues/3164) -## Minor Enchancements +## Minor Enhancements +* **InfluxDB**: Small fix for the "glow" when focus the field for LIMIT and SLIMIT [#7799](https://github.com/grafana/grafana/pull/7799) thx [@thuck](https://github.com/thuck) * **Prometheus**: Make Prometheus query field a textarea [#7663](https://github.com/grafana/grafana/issues/7663), thx [@hagen1778](https://github.com/hagen1778) +* **Prometheus**: Step parameter changed semantics to min step to reduce the load on Prometheus and rendering in browser [#8073](https://github.com/grafana/grafana/pull/8073), thx [@bobrik](https://github.com/bobrik) * **Templating**: Should not be possible to create self-referencing (recursive) template variable definitions [#7614](https://github.com/grafana/grafana/issues/7614) thx [@thuck](https://github.com/thuck) * **Cloudwatch**: Correctly obtain IAM roles within ECS container tasks [#7892](https://github.com/grafana/grafana/issues/7892) thx [@gomlgs](https://github.com/gomlgs) * **Units**: New number format: Scientific notation [#7781](https://github.com/grafana/grafana/issues/7781) thx [@cadnce](https://github.com/cadnce) * **Oauth**: Add common type for oauth authorization errors [#6428](https://github.com/grafana/grafana/issues/6428) thx [@amenzhinsky](https://github.com/amenzhinsky) * **Templating**: Data source variable now supports multi value and panel repeats [#7030](https://github.com/grafana/grafana/issues/7030) thx [@mtanda](https://github.com/mtanda) +* **Telegram**: Telegram alert is not sending metric and legend. [#8110](https://github.com/grafana/grafana/issues/8110), thx [@bashgeek](https://github.com/bashgeek) +* **Graph**: Support dashed lines [#514](https://github.com/grafana/grafana/issues/514), thx [@smalik03](https://github.com/smalik03) +* **Table**: Support to change column header text [#3551](https://github.com/grafana/grafana/issues/3551) +* **Alerting**: Better error when SMTP is not configured [#8093](https://github.com/grafana/grafana/issues/8093) +* **Pushover**: Add an option to attach graph image link in Pushover notification [#8043](https://github.com/grafana/grafana/issues/8043) thx [@devkid](https://github.com/devkid) +* **WebDAV**: Allow to set different ImageBaseUrl for WebDAV upload and image link [#7914](https://github.com/grafana/grafana/issues/7914) +* **Panels**: type-ahead mixed datasource selection [#7697](https://github.com/grafana/grafana/issues/7697) thx [@mtanda](https://github.com/mtanda) +* **Security**:User enumeration problem [#7619](https://github.com/grafana/grafana/issues/7619) +* **InfluxDB**: Register new queries available in InfluxDB - Holt Winters [#5619](https://github.com/grafana/grafana/issues/5619) thx [@rikkuness](https://github.com/rikkuness) +* **Server**: Support listening on a UNIX socket [#4030](https://github.com/grafana/grafana/issues/4030), thx [@mitjaziv](https://github.com/mitjaziv) +* **Graph**: Support log scaling for values smaller 1 [#5278](https://github.com/grafana/grafana/issues/5278) +* **InfluxDB**: Slow 'select measurement' rendering for InfluxDB [#2524](https://github.com/grafana/grafana/issues/2524), thx [@sbhenderson](https://github.com/sbhenderson) +* **Config**: Configurable signout menu activation [#7968](https://github.com/grafana/grafana/pull/7968), thx [@seuf](https://github.com/seuf) ## Fixes * **Table Panel**: Fixed annotation display in table panel, [#8023](https://github.com/grafana/grafana/issues/8023) * **Dashboard**: If refresh is blocked due to tab not visible, then refresh when it becomes visible [#8076](https://github.com/grafana/grafana/issues/8076) thanks [@SimenB](https://github.com/SimenB) +* **Snapshots**: Fixed problem with annotations & snapshots [#7659](https://github.com/grafana/grafana/issues/7659) +* **Graph**: MetricSegment loses type when value is an asterisk [#8277](https://github.com/grafana/grafana/issues/8277), thx [@Gordiychuk](https://github.com/Gordiychuk) +* **Alerting**: Alert notifications do not show charts when using a non public S3 bucket [#8250](https://github.com/grafana/grafana/issues/8250) thx [@rogerswingle](https://github.com/rogerswingle) +* **Graph**: 100% client CPU usage on red alert glow animation [#8222](https://github.com/grafana/grafana/issues/8222) +* **InfluxDB**: Templating: "All" query does match too much [#8165](https://github.com/grafana/grafana/issues/8165) +* **Dashboard**: Description tooltip is not fully displayed [#7970](https://github.com/grafana/grafana/issues/7970) +* **Proxy**: Redirect after switching Org does not obey sub path in root_url (using reverse proxy) [#8089](https://github.com/grafana/grafana/issues/8089) +* **Templating**: Restoration of ad-hoc variable from URL does not work correctly [#8056](https://github.com/grafana/grafana/issues/8056) thx [@tamayika](https://github.com/tamayika) +* **InfluxDB**: timeFilter cannot be used twice in alerts [#7969](https://github.com/grafana/grafana/issues/7969) +* **MySQL**: 4-byte UTF8 not supported when using MySQL database (allows Emojis) [#7958](https://github.com/grafana/grafana/issues/7958) +* **Alerting**: api/alerts and api/alert/:id hold previous data for "message" and "Message" field when field value is changed from "some string" to empty string. [#7927](https://github.com/grafana/grafana/issues/7927) +* **Graph**: Cannot add fill below to series override [#7916](https://github.com/grafana/grafana/issues/7916) +* **InfluxDB**: Influxb Datasource test passes even if the Database doesn't exist [#7864](https://github.com/grafana/grafana/issues/7864) +* **Prometheus**: Displaying Prometheus annotations is incredibly slow [#7750](https://github.com/grafana/grafana/issues/7750), thx [@mtanda](https://github.com/mtanda) +* **Graphite**: grafana generates empty find query to graphite -> 422 Unprocessable Entity [#7740](https://github.com/grafana/grafana/issues/7740) +* **Admin**: make organisation filter case insensitive [#8194](https://github.com/grafana/grafana/issues/8194), thx [@Alexander-N](https://github.com/Alexander-N) + +## Changes +* **Elasticsearch**: Changed elasticsearch Terms aggregation to default to Min Doc Count to 1, and sort order to Top [#8321](https://github.com/grafana/grafana/issues/8321) + +## Tech + +* **Library Upgrade**: inconshreveable/log15 outdated - no support for solaris [#8262](https://github.com/grafana/grafana/issues/8262) +* **Library Upgrade**: Upgrade Macaron [#7600](https://github.com/grafana/grafana/issues/7600) # 4.2.0 (2017-03-22) ## Minor Enhancements diff --git a/LICENSE.md b/LICENSE.md index 2699d589e8e..4c6a79691f0 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -Copyright 2014-2016 Torkel Ödegaard, Raintank Inc. +Copyright 2014-2017 Torkel Ödegaard, Raintank Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may diff --git a/README.md b/README.md index 35bfad353fc..9d2aabebbf3 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[Grafana](https://grafana.com) [![Circle CI](https://circleci.com/gh/grafana/grafana.svg?style=svg)](https://circleci.com/gh/grafana/grafana) +[Grafana](https://grafana.com) [![Circle CI](https://circleci.com/gh/grafana/grafana.svg?style=svg)](https://circleci.com/gh/grafana/grafana) ================ [Website](https://grafana.com) | [Twitter](https://twitter.com/grafana) | @@ -17,14 +17,9 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB. - [What's New in Grafana 4.0](http://docs.grafana.org/guides/whats-new-in-v4/) - [What's New in Grafana 4.1](http://docs.grafana.org/guides/whats-new-in-v4-1/) - [What's New in Grafana 4.2](http://docs.grafana.org/guides/whats-new-in-v4-2/) +- [What's New in Grafana 4.3](http://docs.grafana.org/guides/whats-new-in-v4-3/) ## Features -### Graphite Target Editor -- Graphite target expression parser -- Feature rich query composer -- Quickly add and edit functions & parameters -- Templated queries -- [See it in action](http://docs.grafana.org/datasources/graphite/) ### Graphing - Fast rendering, even over large timespans @@ -48,16 +43,23 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB. - [Time range controls](http://docs.grafana.org/reference/timerange/) - [Share snapshots publicly](http://docs.grafana.org/v2.0/reference/sharing/) -### Elasticsearch -- Feature rich query editor UI - ### InfluxDB - Use InfluxDB as a metric data source, annotation source -- Query editor with series and column typeahead, easy group by and function selection +- Query editor with field and tag typeahead, easy group by and function selection -### OpenTSDB -- Use as metric data source -- Query editor with metric name typeahead and tag filtering +### Graphite +- Graphite target expression parser +- Feature rich query composer +- Quickly add and edit functions & parameters +- Templated queries +- [See it in action](http://docs.grafana.org/datasources/graphite/) + +### Elasticsearch, Prometheus & OpenTSDB +- Feature rich query editor UI + +### Alerting +- Define alert rules using graphs & query conditions +- Schedule & evalute alert rules, send notifications to Slack, Hipchat, Email, PagerDuty, etc. ## Requirements There are no dependencies except an external time series data store. For dashboards and user accounts Grafana can use an embedded @@ -78,8 +80,8 @@ the latest master builds [here](https://grafana.com/grafana/download) ### Dependencies -- Go 1.8 -- NodeJS v4+ +- Go 1.8.1 +- NodeJS LTS ### Get Code @@ -144,8 +146,7 @@ Create a custom.ini in the conf directory to override default configuration opti You only need to add the options you want to override. Config files are applied in the order of: 1. grafana.ini -2. dev.ini (if found) -3. custom.ini +1. custom.ini ## Create a pull request Before or after you create a pull request, sign the [contributor license agreement](http://docs.grafana.org/project/cla/). diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 00000000000..260c4151442 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,31 @@ +# Roadmap (2017-04-23) + +This roadmap is a tentative plan for the core development team. Things change constantly as PRs come in and priorities change. +But it will give you an idea of our current vision and plan. + +### Short term (1-4 months) + + - New Heatmap Panel (Implemented and available in master) + - Support for MySQL & Postgres as data sources (Work started and a alpha version for MySQL is available in master) + - User Groups & Dashboard folders with ACLs (work started, not yet completed, https://github.com/grafana/grafana/issues/1611#issuecomment-287742633) + - Improve new user UX + - Improve docs + - Support for alerting for Elasticsearch (can be tested in [branch](https://github.com/grafana/grafana/tree/alerting-elasticsearch) but needs more work) + - Graph annotations (create from grafana, region annotations, better annotation viz) + - Improve alerting (clustering, silence rules) + +### Long term + +- Improved dashboard panel layout engine (to make it easier and enable more flexible layouts) +- Backend plugins to support more Auth options, Alerting data sources & notifications +- Universial time series transformations for any data source (meta queries) +- Reporting +- Web socket & live data streams +- Migrate to Angular2 + + +### Outside contributions +We know this is being worked on right now by contributors (and we hope to merge it when it's ready). + +- Dashboard revisions (be able to revert dashboard changes) +- Clustering for alert engine (load distribution) diff --git a/build.go b/build.go index a706842dfec..f60a53e5ddb 100644 --- a/build.go +++ b/build.go @@ -235,7 +235,7 @@ func createRpmPackages() { defaultFileSrc: "packaging/rpm/sysconfig/grafana-server", systemdFileSrc: "packaging/rpm/systemd/grafana-server.service", - depends: []string{"/sbin/service", "fontconfig"}, + depends: []string{"/sbin/service", "fontconfig", "freetype", "urw-fonts"}, }) } diff --git a/conf/defaults.ini b/conf/defaults.ini index 9955c22ca17..61c9f7fb5b3 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -25,7 +25,7 @@ plugins = data/plugins #################################### Server ############################## [server] -# Protocol (http or https) +# Protocol (http, https, socket) protocol = http # The ip address to bind to, empty will bind to all interfaces @@ -57,6 +57,9 @@ enable_gzip = false cert_file = cert_key = +# Unix socket path +socket = /tmp/grafana.sock + #################################### Database ############################ [database] # You can configure the database connection by specifying type, host, name, user and password @@ -246,6 +249,7 @@ allowed_domains = hosted_domain = #################################### Grafana.com Auth #################### +# legacy key names (so they work in env variables) [auth.grafananet] enabled = false allow_sign_up = true @@ -254,6 +258,14 @@ client_secret = some_secret scopes = user:email allowed_organizations = +[auth.grafana_com] +enabled = false +allow_sign_up = true +client_id = some_id +client_secret = some_secret +scopes = user:email +allowed_organizations = + #################################### Generic OAuth ####################### [auth.generic_oauth] name = OAuth @@ -430,6 +442,9 @@ prefix = prod.grafana.%(instance_name)s. [grafana_net] url = https://grafana.com +[grafana_com] +url = https://grafana.com + #################################### External Image Storage ############## [external_image_storage] # You can choose between (s3, webdav) @@ -444,3 +459,4 @@ secret_key = url = username = password = +public_url = diff --git a/conf/sample.ini b/conf/sample.ini index 87505221790..65ada5b9468 100644 --- a/conf/sample.ini +++ b/conf/sample.ini @@ -26,7 +26,7 @@ # #################################### Server #################################### [server] -# Protocol (http or https) +# Protocol (http, https, socket) ;protocol = http # The ip address to bind to, empty will bind to all interfaces @@ -59,6 +59,9 @@ ;cert_file = ;cert_key = +# Unix socket path +;socket = + #################################### Database #################################### [database] # You can configure the database connection by specifying type, host, name, user and password @@ -246,7 +249,7 @@ ;allowed_organizations = #################################### Grafana.com Auth #################### -[auth.grafananet] +[auth.grafana_com] ;enabled = false ;allow_sign_up = true ;client_id = some_id @@ -383,7 +386,7 @@ #################################### Grafana.com integration ########################## # Url used to to import dashboards directly from Grafana.com -[grafana_net] +[grafana_com] ;url = https://grafana.com #################################### External image storage ########################## @@ -399,5 +402,6 @@ [external_image_storage.webdav] ;url = +;public_url = ;username = ;password = diff --git a/docker/blocks/mysql/fig b/docker/blocks/mysql/fig index 731d0fbbdc5..24cb47b61a7 100644 --- a/docker/blocks/mysql/fig +++ b/docker/blocks/mysql/fig @@ -10,3 +10,5 @@ mysql: volumes: - /etc/localtime:/etc/localtime:ro - /etc/timezone:/etc/timezone:ro + command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all] + diff --git a/docker/blocks/mysql_opendata/Dockerfile b/docker/blocks/mysql_opendata/Dockerfile new file mode 100644 index 00000000000..c1086d19b82 --- /dev/null +++ b/docker/blocks/mysql_opendata/Dockerfile @@ -0,0 +1,20 @@ +## MySQL with Open Data Set from NYC Open Data (https://data.cityofnewyork.us) + +FROM mysql:latest + +ENV MYSQL_DATABASE="testdata" \ + MYSQL_ROOT_PASSWORD="rootpass" \ + MYSQL_USER="grafana" \ + MYSQL_PASSWORD="password" + +# Install requirement (wget) +RUN apt-get update && apt-get install -y wget && apt-get install unzip + +# Fetch NYC Data Set +RUN wget https://data.cityofnewyork.us/download/57g5-etyj/application%2Fzip -O /tmp/data.zip && \ + unzip -j /tmp/data.zip 311_Service_Requests_from_2015.csv -d /var/lib/mysql-files && \ + rm /tmp/data.zip + +ADD import_csv.sql /docker-entrypoint-initdb.d/ + +EXPOSE 3306 diff --git a/docker/blocks/mysql_opendata/fig b/docker/blocks/mysql_opendata/fig new file mode 100644 index 00000000000..a374fbd0931 --- /dev/null +++ b/docker/blocks/mysql_opendata/fig @@ -0,0 +1,9 @@ +mysql_opendata: + build: blocks/mysql_opendata + environment: + MYSQL_ROOT_PASSWORD: rootpass + MYSQL_DATABASE: testdata + MYSQL_USER: grafana + MYSQL_PASSWORD: password + ports: + - "3307:3306" diff --git a/docker/blocks/mysql_opendata/import_csv.sql b/docker/blocks/mysql_opendata/import_csv.sql new file mode 100644 index 00000000000..d77361f3b9d --- /dev/null +++ b/docker/blocks/mysql_opendata/import_csv.sql @@ -0,0 +1,80 @@ +use testdata; +DROP TABLE IF EXISTS `nyc_open_data`; +CREATE TABLE IF NOT EXISTS `nyc_open_data` ( + UniqueKey bigint(255), + `CreatedDate` varchar(255), + `ClosedDate` varchar(255), + Agency varchar(255), + AgencyName varchar(255), + ComplaintType varchar(255), + Descriptor varchar(255), + LocationType varchar(255), + IncidentZip varchar(255), + IncidentAddress varchar(255), + StreetName varchar(255), + CrossStreet1 varchar(255), + CrossStreet2 varchar(255), + IntersectionStreet1 varchar(255), + IntersectionStreet2 varchar(255), + AddressType varchar(255), + City varchar(255), + Landmark varchar(255), + FacilityType varchar(255), + Status varchar(255), + `DueDate` varchar(255), + ResolutionDescription varchar(2048), + `ResolutionActionUpdatedDate` varchar(255), + CommunityBoard varchar(255), + Borough varchar(255), + XCoordinateStatePlane varchar(255), + YCoordinateStatePlane varchar(255), + ParkFacilityName varchar(255), + ParkBorough varchar(255), + SchoolName varchar(255), + SchoolNumber varchar(255), + SchoolRegion varchar(255), + SchoolCode varchar(255), + SchoolPhoneNumber varchar(255), + SchoolAddress varchar(255), + SchoolCity varchar(255), + SchoolState varchar(255), + SchoolZip varchar(255), + SchoolNotFound varchar(255), + SchoolOrCitywideComplaint varchar(255), + VehicleType varchar(255), + TaxiCompanyBorough varchar(255), + TaxiPickUpLocation varchar(255), + BridgeHighwayName varchar(255), + BridgeHighwayDirection varchar(255), + RoadRamp varchar(255), + BridgeHighwaySegment varchar(255), + GarageLotName varchar(255), + FerryDirection varchar(255), + FerryTerminalName varchar(255), + Latitude varchar(255), + Longitude varchar(255), + Location varchar(255) +); +LOAD DATA INFILE '/var/lib/mysql-files/311_Service_Requests_from_2015.csv' INTO TABLE nyc_open_data FIELDS OPTIONALLY ENCLOSED BY '"' TERMINATED BY ',' IGNORE 1 LINES; +UPDATE nyc_open_data SET CreatedDate = STR_TO_DATE(CreatedDate, '%m/%d/%Y %r') WHERE CreatedDate <> ''; +UPDATE nyc_open_data SET ClosedDate = STR_TO_DATE(ClosedDate, '%m/%d/%Y %r') WHERE ClosedDate <> ''; +UPDATE nyc_open_data SET DueDate = STR_TO_DATE(DueDate, '%m/%d/%Y %r') WHERE DueDate <> ''; +UPDATE nyc_open_data SET ResolutionActionUpdatedDate = STR_TO_DATE(ResolutionActionUpdatedDate, '%m/%d/%Y %r') WHERE ResolutionActionUpdatedDate <> ''; + +UPDATE nyc_open_data SET CreatedDate=null WHERE CreatedDate = ''; +UPDATE nyc_open_data SET ClosedDate=null WHERE ClosedDate = ''; +UPDATE nyc_open_data SET DueDate=null WHERE DueDate = ''; +UPDATE nyc_open_data SET ResolutionActionUpdatedDate=null WHERE ResolutionActionUpdatedDate = ''; + +ALTER TABLE nyc_open_data modify CreatedDate datetime NULL; +ALTER TABLE nyc_open_data modify ClosedDate datetime NULL; +ALTER TABLE nyc_open_data modify DueDate datetime NULL; +ALTER TABLE nyc_open_data modify ResolutionActionUpdatedDate datetime NULL; + +ALTER TABLE `nyc_open_data` ADD INDEX `IX_ComplaintType` (`ComplaintType`); +ALTER TABLE `nyc_open_data` ADD INDEX `IX_CreatedDate` (`CreatedDate`); +ALTER TABLE `nyc_open_data` ADD INDEX `IX_LocationType` (`LocationType`); +ALTER TABLE `nyc_open_data` ADD INDEX `IX_AgencyName` (`AgencyName`); +ALTER TABLE `nyc_open_data` ADD INDEX `IX_City` (`City`); + +SYSTEM rm /var/lib/mysql-files/311_Service_Requests_from_2015.csv diff --git a/docker/blocks/smtp/fig b/docker/blocks/smtp/fig index c2d37e01c21..3aa25e01311 100644 --- a/docker/blocks/smtp/fig +++ b/docker/blocks/smtp/fig @@ -1,4 +1,4 @@ snmpd: - build: blocks/snmpd + image: namshi/smtp ports: - - "161:161" + - "25:25" diff --git a/docs/Makefile b/docs/Makefile index e5c555f8019..c79d3087492 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,4 +1,4 @@ -.PHONY: all default docs docs-build docs-shell shell test +.PHONY: all default docs docs-build docs-shell shell checkvars # to allow `make DOCSPORT=9000 docs` DOCSPORT := 3004 @@ -11,23 +11,24 @@ DOCS_MOUNT := -v $(SOURCES_HOST_DIR):/site/content DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e NOCACHE -p 3004:3004 -p 3005:3005 +VERSION := $(shell head -n 1 VERSION) default: docs +checkvars: +ifndef ENV + $(error ENV is undefined set via ENV=staging or ENV=prod as argument to make) +endif + docs: docs-build $(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "grunt --env=dev-docs && grunt connect --port=3004" -test: docs-build - $(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "ls -la /site/content" - -docs-watch: docs-build +watch: docs-build $(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "grunt --env=dev-docs && grunt connect --port=3004 & grunt watch --port=3004 --env=dev-docs" -publish: docs-build - $(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "./publish.sh staging-docs root" - -publish-prod: docs-build - $(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "./publish.sh prod-docs root" +publish: checkvars docs-build + $(info Publishing ENV=${ENV} and VERSION=${VERSION}) + $(DOCKER_RUN_DOCS) $(DOCS_MOUNT) -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" /bin/bash -c "./publish.sh ${ENV}-docs ${VERSION}" docs-build: docker build -t "$(DOCKER_DOCS_IMAGE)" --no-cache . diff --git a/docs/README.md b/docs/README.md index ea3f8394ace..7aeab15888b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,8 +1,7 @@ # Building The Docs To build the docs locally, you need to have docker installed. The -docs are built using a custom [docker](https://www.docker.com/) image -and the [mkdocs](http://www.mkdocs.org/) tool. +docs are built using [Hugo](http://gohugo.io/) - a static site generator. **Prepare the Docker Image**: @@ -11,19 +10,23 @@ when running ``make docs-build`` depending on how your system's docker service is configured): ``` -$ git clone https://github.com/grafana/grafana.org -$ cd grafana.org -$ make docs-build +git clone https://github.com/grafana/grafana.org +cd grafana.org +make docs-build ``` **Build the Documentation**: Now that the docker image has been prepared we can build the -docs. Switch your working directory back to the directory this file -(README.md) is in and run (possibly with ``sudo``): +grafana docs and start a docs server. Switch your working directory back to the directory this file +(README.md) is in. + +An AWS config file is required to build the docs Docker image and to publish the site to AWS. If you are building locally only and do not have any AWS credentials for docs.grafana.org then create an empty file named `awsconfig` in the current directory. + +Then run (possibly with ``sudo``): ``` -$ make docs +make watch ``` This command will not return control of the shell to the user. Instead @@ -32,4 +35,21 @@ we created in the previous step. Open [localhost:3004](http://localhost:3004) to view the docs. +### Images & Content +All markdown files are located in this repo (main grafana repo). But all images are added to the https://github.com/grafana/grafana.org repo. So the process of adding images is a bit complicated. + +First you need create a feature (PR) branch of https://github.com/grafana/grafana.org so you can make change. Then add the image to the `/static/img/docs` directory. Then make a commit that adds the image. + +Then run: +``` +make docs-build +``` + +This will rebuild the docs docker container. + +To be able to use the image your have to quit (CTRL-C) the `make watch` command (that you run in the same directory as this README). Then simply rerun `make watch`, it will restart the docs server but now with access to your image. + +### Editing content + +Changes to the markdown files should automatically cause a docs rebuild and live reload should reload the page in your browser. diff --git a/docs/VERSION b/docs/VERSION index fd2a01863fd..2fa2c5705ad 100644 --- a/docs/VERSION +++ b/docs/VERSION @@ -1 +1 @@ -3.1.0 +v4.2 diff --git a/docs/publish.sh b/docs/publish.sh new file mode 100755 index 00000000000..4b72f892179 --- /dev/null +++ b/docs/publish.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +make publish ENV=prod VERSION=root diff --git a/docs/sources/administration/cli.md b/docs/sources/administration/cli.md index 8c7755506e8..645f75ab412 100644 --- a/docs/sources/administration/cli.md +++ b/docs/sources/administration/cli.md @@ -27,6 +27,24 @@ To show all admin commands: ### Reset admin password -You can reset the password for the admin user using the CLI. +You can reset the password for the admin user using the CLI. The use case for this command is when you have lost the admin password. `grafana-cli admin reset-admin-password ...` + +If running the command returns this error: + +> Could not find config defaults, make sure homepath command line parameter is set or working directory is homepath + +then there are two flags that can be used to set homepath and the config file path. + +`grafana-cli admin reset-admin-password --homepath "/usr/share/grafana" newpass` + +If you have not lost the admin password then it is better to set in the Grafana UI. If you need to set the password in a script then the [Grafana API](http://docs.grafana.org/http_api/user/#change-password) can be used. Here is an example with curl using basic auth: + +``` +curl -X PUT -H "Content-Type: application/json" -d '{ + "oldPassword": "admin", + "newPassword": "newpass", + "confirmNew": "newpass" +}' http://admin:admin@:3000/api/user/password +``` diff --git a/docs/sources/alerting/notifications.md b/docs/sources/alerting/notifications.md index d0dd19cbcdc..cd1316eb479 100644 --- a/docs/sources/alerting/notifications.md +++ b/docs/sources/alerting/notifications.md @@ -12,18 +12,18 @@ weight = 2 # Alert Notifications -{{< imgbox max-width="40%" img="/img/docs/v4/alert_notifications_menu.png" caption="Alerting notifications" >}} - > Alerting is only available in Grafana v4.0 and above. When an alert changes state it sends out notifications. Each alert rule can have multiple notifications. But in order to add a notification to an alert rule you first need -to add and configure a `notification` object. This is done from the Alerting/Notifications page. +to add and configure a `notification` channel (can be email, Pagerduty or other integration). This is done from the Notification Channels page. -## Notification Setup +## Notification Channel Setup -On the notifications list page hit the `New Notification` button to go the the page where you -can configure and setup a new notification. +{{< imgbox max-width="40%" img="/img/docs/v43/alert_notifications_menu.png" caption="Alerting Notification Channels" >}} + +On the Notification Channels page hit the `New Channel` button to go the the page where you +can configure and setup a new Notification Channel. You specify name and type, and type specific options. You can also test the notification to make sure it's working and setup correctly. @@ -32,15 +32,15 @@ sure it's working and setup correctly. When checked this option will make this notification used for all alert rules, existing and new. -## Supported notification types +## Supported Notification Types -Grafana ships with a set of notification types. More will be added in future releases. +Grafana ships with the following set of notification types: ### Email To enable email notification you have to setup [SMTP settings](/installation/configuration/#smtp) in the Grafana config. Email notification will upload an image of the alert graph to an -external image destination if available or fallback on attaching the image in the email. +external image destination if available or fallback to attaching the image in the email. ### Slack @@ -55,19 +55,29 @@ Setting | Description Recipient | allows you to override the slack recipient. Mention | make it possible to include a mention in the slack notification sent by Grafana. Ex @here or @channel +### PagerDuty + +To set up PagerDuty, all you have to do is to provide an api key. + +Setting | Description +---------- | ----------- +Integration Key | Integration key for pagerduty. +Auto resolve incidents | Resolve incidents in pagerduty once the alert goes back to ok + ### Webhook The webhook notification is a simple way to send information about an state change over HTTP to a custom endpoint. -Using this notification you could integrated Grafana into any system you choose, by yourself. +Using this notification you could integrate Grafana into any system you choose, by yourself. Example json body: + ```json { "title": "My alert", "ruleId": 1, "ruleName": "Load peaking!", "ruleUrl": "http://url.to.grafana/db/dashboard/my_dashboard?panelId=2", - "state": "Alerting", + "state": "alerting", "imageUrl": "http://s3.image.url", "message": "Load is peaking. Make sure the traffic is real and spin up more webfronts", "evalMatches": [ @@ -80,30 +90,38 @@ Example json body: } ``` -### PagerDuty +- **state** - The possible values for alert state are: `ok`, `paused`, `alerting`, `pending`, `no_data`. -To set up PagerDuty, all you have to do is to provide an api key. +### Other Supported Notification Channels -Setting | Description ----------- | ----------- -Integration Key | Integration key for pagerduty. -Auto resolve incidents | Resolve incidents in pagerduty once the alert goes back to ok +Grafana also supports the following Notification Channels: +- HipChat + +- VictorOps + +- Sensu + +- OpsGenie + +- Threema + +- Pushover + +- Telegram + +- LINE # Enable images in notifications {#external-image-store} -Grafana can render the panel associated with the alert rule and include that in the notification. Some types -of notifications require that this image be publicly accessable (Slack for example). In order to support -images in notifications like Slack Grafana can upload the image to an image store. It currently supports -Amazon S3 for this and Webdav. So to set that up you need to configure the -[external image uploader](/installation/configuration/#external-image-storage) in your grafana-server ini -config file. +Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessable (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports +Amazon S3 and Webdav for this. So to set that up you need to configure the [external image uploader](/installation/configuration/#external-image-storage) in your grafana-server ini config file. -This is an optional requirement, you can get slack and email notifications without setting this up. +Currently only the Email Channels attaches images if no external image store is specified. To include images in alert notifications for other channels then you need to set up an external image store. + +This is an optional requirement, you can get Slack and email notifications without setting this up. # Configure the link back to Grafana from alert notifications -All alert notifications contains a link back to the triggered alert in the Grafana instance. -This url is based on the [domain](/installation/configuration/#domain) setting in Grafana. - - +All alert notifications contains a link back to the triggered alert in the Grafana instance. +This url is based on the [domain](/installation/configuration/#domain) setting in Grafana. diff --git a/docs/sources/alerting/rules.md b/docs/sources/alerting/rules.md index a2688de3100..c4a3a012c46 100644 --- a/docs/sources/alerting/rules.md +++ b/docs/sources/alerting/rules.md @@ -52,12 +52,22 @@ Here you can specify the name of the alert rule and how often the scheduler shou ### Conditions Currently the only condition type that exists is a `Query` condition that allows you to -specify a query letter, time range and an aggregation function. The letter refers to -a query you already have added in the **Metrics** tab. The result from the query and the aggregation function is -a single value that is then used in the threshold check. The query used in an alert rule cannot -contain any template variables. Currently we only support `AND` and `OR` operators between conditions and they are executed serially. +specify a query letter, time range and an aggregation function. + + +### Query condition example + +```sql +avg() OF query(A, 5m, now) IS BELOW 14 +``` + +- `avg()` Controls how the values for **each** serie should be reduced to a value that can be compared against the threshold. Click on the function to change it to another aggregation function. +- `query(A, 5m, now)` The letter defines what query to execute from the **Metrics** tab. The second two parameters defines the time range, `5m, now` means 5 minutes from now to now. You can also do `10m, now-2m` to define a time range that will be 10 minutes from now to 2 minutes from now. This is useful if you want to ignore the last 2 minutes of data. +- `IS BELOW 14` Defines the type of threshold and the threshold value. You can click on `IS BELOW` to change the type of threshold. + +The query used in an alert rule cannot contain any template variables. Currently we only support `AND` and `OR` operators between conditions and they are executed serially. For example, we have 3 conditions in the following order: -`condition:A(evaluates to: TRUE) OR condition:B(evaluates to: FALSE) AND condition:C(evaluates to: TRUE)` +*condition:A(evaluates to: TRUE) OR condition:B(evaluates to: FALSE) AND condition:C(evaluates to: TRUE)* so the result will be calculated as ((TRUE OR FALSE) AND TRUE) = TRUE. We plan to add other condition types in the future, like `Other Alert`, where you can include the state diff --git a/docs/sources/archive.md b/docs/sources/archive.md index af4ab5e7a8f..7102395f4c7 100644 --- a/docs/sources/archive.md +++ b/docs/sources/archive.md @@ -12,10 +12,7 @@ weight = 200 Here you can find links to older versions of the documentation that might be better suited for your version of Grafana. -- [Latest](/) -- [Version 3.1](/v3.1) -- [Version 3.0](/v3.0) -- [Version 2.6](/v2.6) -- [Version 2.5](/v2.5) -- [Version 2.1](/v2.1) -- [Version 2.0](/v2.0) +- [Latest](http://docs.grafana.org) +- [Version 4.2](http://docs.grafana.org/v4.2) +- [Version 3.1](http://docs.grafana.org/v3.1) +- [Version 3.0](http://docs.grafana.org/v3.0) diff --git a/docs/sources/features/datasources/cloudwatch.md b/docs/sources/features/datasources/cloudwatch.md index c2896c5b172..8d77e5c59c0 100644 --- a/docs/sources/features/datasources/cloudwatch.md +++ b/docs/sources/features/datasources/cloudwatch.md @@ -13,29 +13,26 @@ weight = 10 # Using AWS CloudWatch in Grafana -Grafana ships with built in support for CloudWatch. You just have to add it as a data source and you will -be ready to build dashboards for you CloudWatch metrics. +Grafana ships with built in support for CloudWatch. You just have to add it as a data source and you will be ready to build dashboards for you CloudWatch metrics. -## Adding the data source -![](/img/docs/cloudwatch/cloudwatch_add.png) +## Adding the data source to Grafana -1. Open the side menu by clicking the the Grafana icon in the top header. +1. Open the side menu by clicking the Grafana icon in the top header. 2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. +3. Click the `+ Add data source` button in the top header. +4. Select `Cloudwatch` from the *Type* dropdown. - > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. - -3. Click the `Add new` link in the top header. -4. Select `CloudWatch` from the dropdown. - > NOTE: If at any moment you have issues with getting this datasource to work and grafana is giving you undescriptive errors then dont forget to check your log file (try looking in /var/log/grafana/). +> NOTE: If at any moment you have issues with getting this datasource to work and Grafana is giving you undescriptive errors then don't +forget to check your log file (try looking in /var/log/grafana/grafana.log). Name | Description ------------ | ------------- -Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards. -Default | Default data source means that it will be pre-selected for new panels. -Credentials profile name | Specify the name of the profile to use (if you use `~/aws/credentials` file), leave blank for default. This option was introduced in Grafana 2.5.1 -Default Region | Used in query editor to set region (can be changed on per query basis) -Custom Metrics namespace | Specify the CloudWatch namespace of Custom metrics -Assume Role Arn | Specify the ARN of the role to assume +*Name* | The data source name. This is how you refer to the data source in panels & queries. +*Default* | Default data source means that it will be pre-selected for new panels. +*Credentials* profile name | Specify the name of the profile to use (if you use `~/aws/credentials` file), leave blank for default. +*Default Region* | Used in query editor to set region (can be changed on per query basis) +*Custom Metrics namespace* | Specify the CloudWatch namespace of Custom metrics +*Assume Role Arn* | Specify the ARN of the role to assume ## Authentication @@ -61,49 +58,64 @@ Example content: ## Metric Query Editor -![](/img/docs/cloudwatch/query_editor.png) +![](/img/docs/v43/cloudwatch_editor.png) You need to specify a namespace, metric, at least one stat, and at least one dimension. ## Templated queries -CloudWatch Datasource Plugin provides the following functions in `Variables values query` field in Templating Editor to query `region`, `namespaces`, `metric names` and `dimension keys/values` on the CloudWatch. + +Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. +Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data +being displayed in your dashboard. + +Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different +types of template variables. + +### Query variable + +CloudWatch Datasource Plugin provides the following queries you can specify in the `Query` field in the Variable +edit view. They allow you to fill a variable's options list with things like `region`, `namespaces`, `metric names` +and `dimension keys/values`. Name | Description ------- | -------- -`regions()` | Returns a list of regions AWS provides their service. -`namespaces()` | Returns a list of namespaces CloudWatch support. -`metrics(namespace, [region])` | Returns a list of metrics in the namespace. (specify region for custom metrics) -`dimension_keys(namespace)` | Returns a list of dimension keys in the namespace. -`dimension_values(region, namespace, metric, dimension_key)` | Returns a list of dimension values matching the specified `region`, `namespace`, `metric` and `dimension_key`. -`ebs_volume_ids(region, instance_id)` | Returns a list of volume id matching the specified `region`, `instance_id`. -`ec2_instance_attribute(region, attribute_name, filters)` | Returns a list of attribute matching the specified `region`, `attribute_name`, `filters`. +*regions()* | Returns a list of regions AWS provides their service. +*namespaces()* | Returns a list of namespaces CloudWatch support. +*metrics(namespace, [region])* | Returns a list of metrics in the namespace. (specify region for custom metrics) +*dimension_keys(namespace)* | Returns a list of dimension keys in the namespace. +*dimension_values(region, namespace, metric, dimension_key)* | Returns a list of dimension values matching the specified `region`, `namespace`, `metric` and `dimension_key`. +*ebs_volume_ids(region, instance_id)* | Returns a list of volume id matching the specified `region`, `instance_id`. +*ec2_instance_attribute(region, attribute_name, filters)* | Returns a list of attribute matching the specified `region`, `attribute_name`, `filters`. For details about the metrics CloudWatch provides, please refer to the [CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html). -## Example templated Queries +#### Examples templated Queries Example dimension queries which will return list of resources for individual AWS Services: -Service | Query +Query | Service ------- | ----- -ELB | `dimension_values(us-east-1,AWS/ELB,RequestCount,LoadBalancerName)` -ElastiCache | `dimension_values(us-east-1,AWS/ElastiCache,CPUUtilization,CacheClusterId)` -RedShift | `dimension_values(us-east-1,AWS/Redshift,CPUUtilization,ClusterIdentifier)` -RDS | `dimension_values(us-east-1,AWS/RDS,CPUUtilization,DBInstanceIdentifier)` -S3 | `dimension_values(us-east-1,AWS/S3,BucketSizeBytes,BucketName)` +*dimension_values(us-east-1,AWS/ELB,RequestCount,LoadBalancerName)* | ELB +*dimension_values(us-east-1,AWS/ElastiCache,CPUUtilization,CacheClusterId)* | ElastiCache +*dimension_values(us-east-1,AWS/Redshift,CPUUtilization,ClusterIdentifier)* | RedShift +*dimension_values(us-east-1,AWS/RDS,CPUUtilization,DBInstanceIdentifier)* | RDS +*dimension_values(us-east-1,AWS/S3,BucketSizeBytes,BucketName)* | S3 -## ec2_instance_attribute JSON filters +#### ec2_instance_attribute JSON filters The `ec2_instance_attribute` query take `filters` in JSON format. You can specify [pre-defined filters of ec2:DescribeInstances](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html). -Specify like `{ filter_name1: [ filter_value1 ], filter_name2: [ filter_value2 ] }` + +Filters syntax: + +```javascript +{ filter_name1: [ filter_value1 ], filter_name2: [ filter_value2 ] } +``` Example `ec2_instance_attribute()` query ec2_instance_attribute(us-east-1, InstanceId, { "tag:Environment": [ "production" ] }) -![](/img/docs/v2/cloudwatch_templating.png) - ## Cost Amazon provides 1 million CloudWatch API requests each month at no additional charge. Past this, diff --git a/docs/sources/features/datasources/elasticsearch.md b/docs/sources/features/datasources/elasticsearch.md index 4f8bfaf968c..25cdb98c8c5 100644 --- a/docs/sources/features/datasources/elasticsearch.md +++ b/docs/sources/features/datasources/elasticsearch.md @@ -12,34 +12,29 @@ weight = 3 # Using Elasticsearch in Grafana -Grafana ships with advanced support for Elasticsearch. You can do many types of -simple or complex elasticsearch queries to visualize logs or metrics stored in elasticsearch. You can -also annotate your graphs with log events stored in elasticsearch. +Grafana ships with advanced support for Elasticsearch. You can do many types of simple or complex Elasticsearch queries to +visualize logs or metrics stored in Elasticsearch. You can also annotate your graphs with log events stored in Elasticsearch. ## Adding the data source -![](/img/docs/v2/add_Graphite.jpg) - -1. Open the side menu by clicking the the Grafana icon in the top header. +1. Open the side menu by clicking the Grafana icon in the top header. 2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. +3. Click the `+ Add data source` button in the top header. +4. Select *Elasticsearch* from the *Type* dropdown. - > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. - -3. Click the `Add new` link in the top header. -4. Select `Elasticsearch` from the dropdown. +> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization. Name | Description ------------ | ------------- -Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards. -Default | Default data source means that it will be pre-selected for new panels. -Url | The http protocol, ip and port of you elasticsearch server. -Access | Proxy = access via Grafana backend, Direct = access directly from browser. +*Name* | The data source name. This is how you refer to the data source in panels & queries. +*Default* | Default data source means that it will be pre-selected for new panels. +*Url* | The HTTP protocol, IP, and port of your Elasticsearch server. +*Access* | Proxy = access via Grafana backend, Direct = access directly from browser. -Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser. - -Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source. +Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication to the browser. ### Direct access + If you select direct access you must update your Elasticsearch configuration to allow other domains to access Elasticsearch from the browser. You do this by specifying these to options in your **elasticsearch.yml** config file. @@ -50,46 +45,94 @@ Elasticsearch from the browser. You do this by specifying these to options in yo ![](/img/docs/elasticsearch/elasticsearch_ds_details.png) -Here you can specify a default for the `time field` and specify the name of your elasticsearch index. You can use +Here you can specify a default for the `time field` and specify the name of your Elasticsearch index. You can use a time pattern for the index name or a wildcard. +### Elasticsearch version + +Be sure to specify your Elasticsearch version in the version selection dropdown. This is very important as there are differences how queries are composed. Currently only 2.x and 5.x +are supported. + ## Metric Query editor ![](/img/docs/elasticsearch/query_editor.png) -The Elasticsearch query editor allows you to select multiple metrics and group by multiple terms or filters. Use the plus and minus icons to the right to add / remove -metrics or group bys. Some metrics and group by have options, click the option text to expand the the row to view and edit metric or group by options. +The Elasticsearch query editor allows you to select multiple metrics and group by multiple terms or filters. Use the plus and minus icons to the right to add/remove +metrics or group by clauses. Some metrics and group by clauses haves options, click the option text to expand the row to view and edit metric or group by options. + +## Series naming & alias patterns + +You can control the name for time series via the `Alias` input field. + +Pattern | Description +------------ | ------------- +*{{term fieldname}}* | replaced with value of a term group by +*{{metric}}* | replaced with metric name (ex. Average, Min, Max) +*{{field}}* | replaced with the metric field name ## Pipeline metrics -If you have Elasticsearch 2.x and Grafana 2.6 or above then you can use pipeline metric aggregations like -**Moving Average** and **Derivative**. Elasticsearch pipeline metrics require another metric to be based on. Use the eye icon next to the metric -to hide metrics from appearing in the graph. This is useful for metrics you only have in the query to be used -in a pipeline metric. +Some metric aggregations are called Pipeline aggregations, for example, *Moving Average* and *Derivative*. Elasticsearch pipeline metrics require another metric to be based on. Use the eye icon next to the metric to hide metrics from appearing in the graph. This is useful for metrics you only have in the query for use in a pipeline metric. ![](/img/docs/elasticsearch/pipeline_metrics_editor.png) ## Templating -The Elasticsearch datasource supports two types of queries you can use to fill template variables with values. +Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. +Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data +being displayed in your dashboard. -### Possible values for a field +Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different +types of template variables. -```json -{"find": "terms", "field": "@hostname"} +### Query variable + +The Elasticsearch data source supports two types of queries you can use in the *Query* field of *Query* variables. The query is written using a custom JSON string. + +Query | Description +------------ | ------------- +*{"find": "fields", "type": "keyword"} | Returns a list of field names with the index type `keyword`. +*{"find": "terms", "field": "@hostname", "size": 1000}* | Returns a list of values for a field using term aggregation. Query will user current dashboard time range as time range for query. +*{"find": "terms", "field": "@hostname", "query": ''}* | Returns a list of values for a field using term aggregation & and a specified lucene query filter. Query will use current dashboard time range as time range for query. + +There is a default size limit of 500 on terms queries. Set the size property in your query to set a custom limit. +You can use other variables inside the query. Example query definition for a variable named `$host`. + +``` +{"find": "terms", "field": "@hostname", "query": "@source:$source"} ``` -### Fields filtered by type -```json -{"find": "fields", "type": "string"} -``` +In the above example, we use another variable named `$source` inside the query definition. Whenever you change, via the dropdown, the current value of the ` $source` variable, it will trigger an update of the `$host` variable so it now only contains hostnames filtered by in this case the +`@source` document property. -### Fields filtered by type, with filter -```json -{"find": "fields", "type": "string", "query": } -``` +### Using variables in queries -### Multi format / All format -Use lucene format. +There are two syntaxes: +- `$` Example: @hostname:$hostname +- `[[varname]]` Example: @hostname:[[hostname]] +Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. When the *Multi-value* or *Include all value* +options are enabled, Grafana converts the labels from plain text to a lucene compatible condition. + +![](/img/docs/v43/elastic_templating_query.png) + +In the above example, we have a lucene query that filters documents based on the `@hostname` property using a variable named `$hostname`. It is also using +a variable in the *Terms* group by field input box. This allows you to use a variable to quickly change how the data is grouped. + +Example dashboard: +[Elasticsearch Templated Dashboard](http://play.grafana.org/dashboard/db/elasticsearch-templated) + +## Annotations + +[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation +queries via the Dashboard menu / Annotations view. Grafana can query any Elasticsearch index +for annotation events. + +Name | Description +------------ | ------------- +Query | You can leave the search query blank or specify a lucene query +Time | The name of the time field, needs to be date field. +Title | The name of the field to use for the event title. +Tags | Optional field name to use for event tags (can be an array or a CSV string). +Text | Optional field name to use event text body. diff --git a/docs/sources/features/datasources/graphite.md b/docs/sources/features/datasources/graphite.md index 11e15ab503e..a625b93e8d8 100644 --- a/docs/sources/features/datasources/graphite.md +++ b/docs/sources/features/datasources/graphite.md @@ -18,28 +18,22 @@ change function parameters and much more. The editor can handle all types of gra queries through the use of query references. ## Adding the data source -![](/img/docs/v2/add_Graphite.jpg) -1. Open the side menu by clicking the the Grafana icon in the top header. +1. Open the side menu by clicking the Grafana icon in the top header. 2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. +3. Click the `+ Add data source` button in the top header. +4. Select `Graphite` from the *Type* dropdown. - > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. - -3. Click the `Add new` link in the top header. -4. Select `Graphite` from the dropdown. +> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization. Name | Description ------------ | ------------- -Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards. -Default | Default data source means that it will be pre-selected for new panels. -Url | The http protocol, ip and port of your graphite-web or graphite-api install. -Access | Proxy = access via Grafana backend, Direct = access directly from browser. - - -Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser. - -Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source. +*Name* | The data source name. This is how you refer to the data source in panels & queries. +*Default* | Default data source means that it will be pre-selected for new panels. +*Url* | The HTTP protocol, IP, and port of your graphite-web or graphite-api install. +*Access* | Proxy = access via Grafana backend, Direct = access directly from browser. +Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the browser. ## Metric editor @@ -50,6 +44,7 @@ or keyboard arrow keys. You can select a wildcard and still continue. ![](/img/docs/animated_gifs/graphite_query1.gif) ### Functions + Click the plus icon to the right to add a function. You can search for the function or select it from the menu. Once a function is selected it will be added and your focus will be in the text box of the first parameter. To later change a parameter just click on it and it will turn into a text box. To delete a function click the function name followed @@ -57,32 +52,61 @@ by the x icon. ![](/img/docs/animated_gifs/graphite_query2.gif) - ### Optional parameters + Some functions like aliasByNode support an optional second argument. To add this parameter specify for example 3,-2 as the first parameter and the function editor will adapt and move the -2 to a second parameter. To remove the second optional parameter just click on it and leave it blank and the editor will remove it. ![](/img/docs/animated_gifs/func_editor_optional_params.gif) +### Nested Queries + +You can reference queries by the row “letter” that they’re on (similar to Microsoft Excel). If you add a second query to a graph, you can reference the first query simply by typing in #A. This provides an easy and convenient way to build compounded queries. + ## Point consolidation -All Graphite metrics are consolidated so that Graphite doesn't return more data points than there are pixels in the graph. By default +All Graphite metrics are consolidated so that Graphite doesn't return more data points than there are pixels in the graph. By default, this consolidation is done using `avg` function. You can how Graphite consolidates metrics by adding the Graphite consolidateBy function. > *Notice* This means that legend summary values (max, min, total) cannot be all correct at the same time. They are calculated > client side by Grafana. And depending on your consolidation function only one or two can be correct at the same time. ## Templating -You can create a template variable in Grafana and have that variable filled with values from any Graphite metric exploration query. -You can then use this variable in your Graphite queries, either as part of a metric path or as arguments to functions. -For example a query like `prod.servers.*` will fill the variable with all possible -values that exists in the wildcard position. +Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. +Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data +being displayed in your dashboard. + +Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different +types of template variables. + +### Query variable + +The query you specify in the query field should be a metric find type of query. For example, a query like `prod.servers.*` will fill the +variable with all possible values that exist in the wildcard position. You can also create nested variables that use other variables in their definition. For example `apps.$app.servers.*` uses the variable `$app` in its query definition. +### Variable usage + +You can use a variable in a metric node path or as a parameter to a function. ![](/img/docs/v2/templated_variable_parameter.png) +There are two syntaxes: -## Query Reference -You can reference queries by the row “letter” that they’re on (similar to Microsoft Excel). If you add a second query to graph, you can reference the first query simply by typing in #A. This provides an easy and convenient way to build compounded queries. +- `$` Example: apps.frontend.$server.requests.count +- `[[varname]]` Example: apps.frontend.[[server]].requests.count + +Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. Use +the second syntax in expressions like `my.server[[serverNumber]].count`. + +Example: +[Graphite Templated Dashboard](http://play.grafana.org/dashboard/db/graphite-templated-nested) + +## Annotations + +[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation +queries via the Dashboard menu / Annotations view. + +Graphite supports two ways to query annotations. A regular metric query, for this you use the `Graphite query` textbox. A Graphite events query, use the `Graphite event tags` textbox, +specify a tag or wildcard (leave empty should also work) diff --git a/docs/sources/features/datasources/influxdb.md b/docs/sources/features/datasources/influxdb.md index 03dc26bcc26..2837363e145 100644 --- a/docs/sources/features/datasources/influxdb.md +++ b/docs/sources/features/datasources/influxdb.md @@ -15,29 +15,29 @@ weight = 3 Grafana ships with very feature rich data source plugin for InfluxDB. Supporting a feature rich query editor, annotation and templating queries. ## Adding the data source -![](/img/docs/v2/add_Influx.jpg) -1. Open the side menu by clicking the the Grafana icon in the top header. +1. Open the side menu by clicking the Grafana icon in the top header. 2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. +3. Click the `+ Add data source` button in the top header. +4. Select *InfluxDB* from the *Type* dropdown. - > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. - -3. Click the `Add new` link in the top header. +> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization. Name | Description ------------ | ------------- -Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards. -Default | Default data source means that it will be pre-selected for new panels. -Url | The http protocol, ip and port of you influxdb api (influxdb api port is by default 8086) -Access | Proxy = access via Grafana backend, Direct = access directly from browser. -Database | Name of your influxdb database -User | Name of your database user -Password | Database user's password +*Name* | The data source name. This is how you refer to the data source in panels & queries. +*Default* | Default data source means that it will be pre-selected for new panels. +*Url* | The http protocol, ip and port of you influxdb api (influxdb api port is by default 8086) +*Access* | Proxy = access via Grafana backend, Direct = access directly from browser. +*Database* | Name of your influxdb database +*User* | Name of your database user +*Password* | Database user's password - > Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser. - - > Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source. +### Proxy vs Direct access +Proxy access means that the Grafana backend will proxy all requests from the browser. So requests to InfluxDB will be channeled through +`grafana-server`. This means that the URL you specify needs to be accessable from the server you are running Grafana on. Proxy access +mode is also more secure as the username & password will never reach the browser. ## Query Editor @@ -100,11 +100,21 @@ change the option `Format As` to `Table` if you want to show raw data in the `Ta ## Templating -You can create a template variable in Grafana and have that variable filled with values from any InfluxDB metric exploration query. -You can then use this variable in your InfluxDB metric queries. -For example you can have a variable that contains all values for tag `hostname` if you specify a query like this -in the templating edit view. +Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. +Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data +being displayed in your dashboard. + +Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different +types of template variables. + +### Query variable + +If you add a template variable of the type `Query` you can write a InfluxDB exploration (meta data) query. These queries can +return things like measurement names, key names or key values. + +For example you can have a variable that contains all values for tag `hostname` if you specify a query like this in the templating variable *Query* setting. + ```sql SHOW TAG VALUES WITH KEY = "hostname" ``` @@ -116,12 +126,46 @@ the hosts variable only show hosts from the current selected region with a query SHOW TAG VALUES WITH KEY = "hostname" WHERE region =~ /$region/ ``` -> Always use `regex values` or `regex wildcard` for All format or multi select format. +You can fetch key names for a given measurement. -![](/img/docs/influxdb/templating_simple_ex1.png) +```sql +SHOW TAG KEYS [FROM ] +``` + +If you have a variable with key names you can use this variable in a group by clause. This will allow you to change group by using the variable dropdown a the top +of the dashboard. + +### Using variables in queries + +There are two syntaxes: + +`$` Example: + +```sql +SELECT mean("value") FROM "logins" WHERE "hostname" =~ /^$host$/ AND $timeFilter GROUP BY time($__interval), "hostname" +``` + +`[[varname]]` Example: + +```sql +SELECT mean("value") FROM "logins" WHERE "hostname" =~ /^[[host]]$/ AND $timeFilter GROUP BY time($__interval), "hostname" +``` + +Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. When the *Multi-value* or *Include all value* +options are enabled, Grafana converts the labels from plain text to a regex compatible string. Which means you have to use `=~` instead of `=`. + +Example Dashboard: +[InfluxDB Templated Dashboard](http://play.grafana.org/dashboard/db/influxdb-templated-queries) + +### Ad hoc filters variable + +InfluxDB supports the special `Ad hoc filters` variable type. This variable allows you to specify any number of key/value filters on the fly. These filters will automatically +be applied to all your InfluxDB queries. ## Annotations -Annotations allows you to overlay rich event information on top of graphs. + +[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation +queries via the Dashboard menu / Annotations view. An example query: @@ -129,4 +173,8 @@ An example query: SELECT title, description from events WHERE $timeFilter order asc ``` +For InfluxDB you need to enter a query like in the above example. You need to have the ```where $timeFilter``` +part. If you only select one column you will not need to enter anything in the column mapping fields. The +Tags field can be a comma seperated string. + diff --git a/docs/sources/features/datasources/mysql.md b/docs/sources/features/datasources/mysql.md new file mode 100644 index 00000000000..2072e6a4f7b --- /dev/null +++ b/docs/sources/features/datasources/mysql.md @@ -0,0 +1,118 @@ ++++ +title = "Using MySQL in Grafana" +description = "Guide for using MySQL in Grafana" +keywords = ["grafana", "mysql", "guide"] +type = "docs" +[menu.docs] +name = "MySQL" +parent = "datasources" +weight = 7 ++++ + +# Using MySQL in Grafana + +> Only available in Grafana v4.3+. This data source is not ready for +> production use, currently in development (alpha state). + +Grafana ships with a built-in MySQL data source plugin that allow you to query any visualize +data from a MySQL compatible database. + +## Adding the data source + +1. Open the side menu by clicking the Grafana icon in the top header. +2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. +3. Click the `+ Add data source` button in the top header. +4. Select *MySQL* from the *Type* dropdown. + +### Database User Permissions (Important!) + +The database user you specify when you add the data source should only be granted SELECT permissions on +the specified database & tables you want to query. Grafana does not validate that the query is safe. The query +could include any SQL statement. For example, statements like `USE otherdb;` and `DROP TABLE user;` would be +executed. To protect against this we **Highly** recommmend you create a specific mysql user with +restricted permissions. + +Example: + +```sql + CREATE USER 'grafanaReader' IDENTIFIED BY 'password'; + GRANT SELECT ON mydatabase.mytable TO 'grafanaReader'; +``` + +You can use wildcards (`*`) in place of database or table if you want to grant access to more databases and tables. + +## Macros + +To simplify syntax and to allow for dynamic parts, like date range filters, the query can contain macros. + +Macro example | Description +------------ | ------------- +*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn > FROM_UNIXTIME(1494410783) AND dateColumn < FROM_UNIXTIME(1494497183)* + +We plan to add many more macros. If you have suggestions for what macros you would like to see, please +[open an issue](https://github.com/grafana/grafana) in our GitHub repo. + +The query editor has a link named `Generated SQL` that show up after a query as been executed, while in panel edit mode. Click +on it and it will expand and show the raw interpolated SQL string that was executed. + +## Table queries + +If the `Format as` query option is set to `Table` then you can basically do any type of SQL query. The table panel will automatically show the results of whatever columns & rows your query returns. + +Query editor with example query: + +![](/img/docs/v43/mysql_table_query.png) + + +The query: + +```sql +SELECT + title as 'Title', + user.login as 'Created By' , + dashboard.created as 'Created On' + FROM dashboard +INNER JOIN user on user.id = dashboard.created_by +WHERE $__timeFilter(dashboard.created) +``` + +You can control the name of the Table panel columns by using regular `as ` SQL column selection syntax. + +The resulting table panel: + +![](/img/docs/v43/mysql_table.png) + +### Time series queries + +If you set `Format as` to `Time series`, for use in Graph panel for example, then there are some requirements for +what your query returns. + +- Must be a column named `time_sec` representing a unix epoch in seconds. +- Must be a column named `value` representing the time series value. +- Must be a column named `metric` representing the time series name. + +Example: + +```sql +SELECT + min(UNIX_TIMESTAMP(time_date_time)) as time_sec, + max(value_double) as value, + metric1 as metric +FROM test_data +WHERE $__timeFilter(time_date_time) +GROUP BY metric1, UNIX_TIMESTAMP(time_date_time) DIV 300 +ORDER BY time_sec asc +``` + +Currently, there is no support for a dynamic group by time based on time range & panel width. +This is something we plan to add. + +## Templating + +You can use variables in your queries but there are currently no support for defining `Query` variables +that target a MySQL data source. + +## Alerting + +Time series queries should work in alerting conditions. Table formatted queries is not yet supported in alert rule +conditions. diff --git a/docs/sources/features/datasources/opentsdb.md b/docs/sources/features/datasources/opentsdb.md index 2b1c7e5c3f5..03795473ff7 100644 --- a/docs/sources/features/datasources/opentsdb.md +++ b/docs/sources/features/datasources/opentsdb.md @@ -12,59 +12,79 @@ weight = 5 # Using OpenTSDB in Grafana -{{< docs-imagebox img="/img/docs/v2/add_OpenTSDB.png" max-width="14rem" >}} +Grafana ships with advanced support for OpenTSDB. -The newest release of Grafana adds additional functionality when using an OpenTSDB Data source. +## Adding the data source -1. Open the side menu by clicking the the Grafana icon in the top header. +1. Open the side menu by clicking the Grafana icon in the top header. 2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. +3. Click the `+ Add data source` button in the top header. +4. Select *OpenTSDB* from the *Type* dropdown. - > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. - -3. Click the `Add new` link in the top header. -4. Select `OpenTSDB` from the dropdown. +> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization. Name | Description ------------ | ------------- -Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards. -Default | Default data source means that it will be pre-selected for new panels. -Url | The http protocol, ip and port of you opentsdb server (default port is usually 4242) -Access | Proxy = access via Grafana backend, Direct = access directly from browser. -Version | Version = opentsdb version, either <=2.1 or 2.2 -Resolution | Metrics from opentsdb may have datapoints with either second or millisecond resolution. +*Name* | The data source name. This is how you refer to the data source in panels & queries. +*Default* | Default data source means that it will be pre-selected for new panels. +*Url* | The http protocol, ip and port of you opentsdb server (default port is usually 4242) +*Access* | Proxy = access via Grafana backend, Direct = access directly from browser. +*Version* | Version = opentsdb version, either <=2.1 or 2.2 +*Resolution* | Metrics from opentsdb may have datapoints with either second or millisecond resolution. + ## Query editor -Open a graph in edit mode by click the title. Query editor will differ if the datasource has version <=2.1 or = 2.2. In the former version, only tags can be used to query opentsdb. But in the latter version, filters as well as tags can be used to query opentsdb. Fill Policy is also introduced in opentsdb 2.2. - > Note: While using Opentsdb 2.2 datasource, make sure you use either Filters or Tags as they are mutually exclusive. If used together, might give you weird results. +Open a graph in edit mode by click the title. Query editor will differ if the datasource has version <=2.1 or = 2.2. +In the former version, only tags can be used to query OpenTSDB. But in the latter version, filters as well as tags +can be used to query opentsdb. Fill Policy is also introduced in OpenTSDB 2.2. -![](/img/docs/v2/opentsdb_query_editor.png) +![](/img/docs/v43/opentsdb_query_editor.png) + +> Note: While using OpenTSDB 2.2 datasource, make sure you use either Filters or Tags as they are mutually exclusive. If used together, might give you weird results. ### Auto complete suggestions -As soon as you start typing metric names, tag names and tag values , you should see highlighted auto complete suggestions for them. - > Note: This is required for the OpenTSDB `suggest` api to work. +As soon as you start typing metric names, tag names and tag values , you should see highlighted auto complete suggestions for them. +The autocomplete only works if the OpenTSDB suggest api is enabled. ## Templating queries -Grafana's OpenTSDB data source now supports template variable values queries. This means you can create template variables that fetch the values from OpenTSDB (for example metric names, tag names, or tag values). The query editor is also enhanced to limiting tags by metric. + +Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. +Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data +being displayed in your dashboard. + +Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different +types of template variables. + +### Query variable + +Grafana's OpenTSDB data source supports template variable queries. This means you can create template variables +that fetch the values from OpenTSDB. For example, metric names, tag names, or tag values. When using OpenTSDB with a template variable of `query` type you can use following syntax for lookup. - metrics(prefix) // returns metric names with specific prefix (can be empty) - tag_names(cpu) // return tag names (i.e. keys) for a specific cpu metric - tag_values(cpu, hostname) // return tag values for metric cpu and tag key hostname - suggest_tagk(prefix) // return tag names (i.e. keys) for all metrics with specific prefix (can be empty) - suggest_tagv(prefix) // return tag values for all metrics with specific prefix (can be empty) +Query | Description +------------ | ------------- +*metrics(prefix)* | Returns metric names with specific prefix (can be empty) +*tag_names(cpu)* | Return tag names (i.e. keys) for a specific cpu metric +*tag_values(cpu, hostname)* | Return tag values for metric cpu and tag key hostname +*suggest_tagk(prefix)* | Return tag names (i.e. keys) for all metrics with specific prefix (can be empty) +*suggest_tagv(prefix)* | Return tag values for all metrics with specific prefix (can be empty) -If you do not see template variables being populated in `Preview of values` section, you need to enable `tsd.core.meta.enable_realtime_ts` in the OpenTSDB server settings. Also, to populate metadata of the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` on the OpenTSDB server. +If you do not see template variables being populated in `Preview of values` section, you need to enable +`tsd.core.meta.enable_realtime_ts` in the OpenTSDB server settings. Also, to populate metadata of +the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` on the OpenTSDB server. ### Nested Templating -One template variable can be used to filter tag values for another template varible. Very importantly, the order of the parameters matter in tag_values function. First parameter is the metric name, second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables. Some examples are mentioned below to make nested template queries work successfully. +One template variable can be used to filter tag values for another template varible. First parameter is the metric name, +second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables. +Some examples are mentioned below to make nested template queries work successfully. - tag_values(cpu, hostname, env=$env) // return tag values for cpu metric, selected env tag value and tag key hostname - tag_values(cpu, hostanme, env=$env, region=$region) // return tag values for cpu metric, selected env tag value, selected region tag value and tag key hostname +Query | Description +------------ | ------------- +*tag_values(cpu, hostname, env=$env)* | Return tag values for cpu metric, selected env tag value and tag key hostname +*tag_values(cpu, hostanme, env=$env, region=$region)* | Return tag values for cpu metric, selected env tag value, selected region tag value and tag key hostname -> Note: This is required for the OpenTSDB `lookup` api to work. - -For details on opentsdb metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html) +For details on OpenTSDB metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html) diff --git a/docs/sources/features/datasources/prometheus.md b/docs/sources/features/datasources/prometheus.md index 947d7426230..4b5c3f115e8 100644 --- a/docs/sources/features/datasources/prometheus.md +++ b/docs/sources/features/datasources/prometheus.md @@ -10,74 +10,86 @@ parent = "datasources" weight = 2 +++ - # Using Prometheus in Grafana -Grafana includes support for Prometheus Datasources. While the process of adding the datasource is similar to adding a Graphite or OpenTSDB datasource type, Prometheus does have a few different options for building queries. +Grafana includes built-in support for Prometheus. ## Adding the data source to Grafana -![](/img/docs/v2/add_Prometheus.png) - -1. Open the side menu by clicking the the Grafana icon in the top header. +1. Open the side menu by clicking the Grafana icon in the top header. 2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. +3. Click the `+ Add data source` button in the top header. +4. Select `Prometheus` from the *Type* dropdown. - > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. +> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization. -3. Click the `Add new` link in the top header. -4. Select `Prometheus` from the dropdown. +## Data source options Name | Description ------------ | ------------- -Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards. -Default | Default data source means that it will be pre-selected for new panels. -Url | The http protocol, ip and port of you Prometheus server (default port is usually 9090) -Access | Proxy = access via Grafana backend, Direct = access directly from browser. -Basic Auth | Enable basic authentication to the Prometheus datasource. -User | Name of your Prometheus user -Password | Database user's password - - > Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser. - - > Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source. +*Name* | The data source name. This is how you refer to the data source in panels & queries. +*Default* | Default data source means that it will be pre-selected for new panels. +*Url* | The http protocol, ip and port of you Prometheus server (default port is usually 9090) +*Access* | Proxy = access via Grafana backend, Direct = access directly from browser. +*Basic Auth* | Enable basic authentication to the Prometheus data source. +*User* | Name of your Prometheus user +*Password* | Database user's password ## Query editor -Open a graph in edit mode by click the title. -![](/img/v2/prometheus_editor.png) +Open a graph in edit mode by click the title > Edit (or by pressing `e` key while hovering over panel). -For details on Prometheus metric queries check out the Prometheus documentation -- [Query Metrics - Prometheus documentation](http://prometheus.io/docs/querying/basics/). - -## Templated queries - -Prometheus Datasource Plugin provides the following functions in `Variables values query` field in Templating Editor to query `metric names` and `labels names` on the Prometheus server. +![](/img/docs/v43/prometheus_query_editor.png) Name | Description ------- | -------- -`label_values(label)` | Returns a list of label values for the `label` in every metric. -`label_values(metric, label)` | Returns a list of label values for the `label` in the specified metric. -`metrics(metric)` | Returns a list of metrics matching the specified `metric` regex. -`query_result(query)` | Returns a list of Prometheus query result for the `query`. +*Query expression* | Prometheus query expression, check out the [Prometheus documentation](http://prometheus.io/docs/querying/basics/). +*Legend format* | Controls the name of the time series, using name or pattern. For example `{{hostname}}` will be replaced with label value for the label `hostname`. +*Min step* | Set a lower limit for the Prometheus step option. Step controls how big the jumps are when the Prometheus query engine performs range queries. Sadly there is no official prometheus documentation to link to for this very important option. +*Resolution* | Controls the step option. Small steps create high-resolution graphs but can be slow over larger time ranges, lowering the resolution can speed things up. `1/2` will try to set step option to generate 1 data point for every other pixel. A value of `1/10` will try to set step option so there is a data point every 10 pixels.*Metric lookup* | Search for metric names in this input field. +*Format as* | **(New in v4.3)** Switch between Table & Time series. Table format will only work in the Table panel. -For details of `metric names` & `label names`, and `label values`, please refer to the [Prometheus documentation](http://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). +## Templating -> Note: The part of queries is incompatible with the version before 2.6, if you specify like `foo.*`, please change like `metrics(foo.*)`. +Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. +Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data +being displayed in your dashboard. -You can create a template variable in Grafana and have that variable filled with values from any Prometheus metric exploration query. -You can then use this variable in your Prometheus metric queries. +Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different +types of template variables. -For example you can have a variable that contains all values for label `hostname` if you specify a query like this in the templating edit view. +### Query variable -```sql -label_values(hostname) -``` +Variable of the type *Query* allows you to query Prometheus for a list of metrics, labels or label values. The Prometheus data source plugin +provides the following functions you can use in the `Query` input field. -You can also use raw queries & regular expressions to extract anything you might need. +Name | Description +---- | -------- +*label_values(label)* | Returns a list of label values for the `label` in every metric. +*label_values(metric, label)* | Returns a list of label values for the `label` in the specified metric. +*metrics(metric)* | Returns a list of metrics matching the specified `metric` regex. +*query_result(query)* | Returns a list of Prometheus query result for the `query`. -### Using templated variables in queries +For details of *metric names*, *label names* and *label values* are please refer to the [Prometheus documentation](http://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). -When the `Include All` option or `Multi-Value` option is enabled, Grafana converts the labels from plain text to a regex compatible string. -Which means you have to use `=~` instead of `=` in your Prometheus queries. For example `ALERTS{instance=~$instance}` instead of `ALERTS{instance=$instance}`. +### Using variables in queries -![](/img/docs/v2/prometheus_templating.png) +There are two syntaxes: + +- `$` Example: rate(http_requests_total{job=~"$job"}[5m]) +- `[[varname]]` Example: rate(http_requests_total{job="my[[job]]"}[5m]) + +Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. When the *Multi-value* or *Include all value* +options are enabled, Grafana converts the labels from plain text to a regex compatible string. Which means you have to use `=~` instead of `=`. + +## Annotations + +[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation +queries via the Dashboard menu / Annotations view. + +Prometheus supports two ways to query annotations. + +- A regular metric query +- A Prometheus query for pending and firing alerts (for details see [Inspecting alerts during runtime](https://prometheus.io/docs/alerting/rules/#inspecting-alerts-during-runtime)) + +The step option is useful to limit the number of events returned from your query. diff --git a/docs/sources/features/datasources/testdata.md b/docs/sources/features/datasources/testdata.md index 3e3f0909700..3475980d2b4 100644 --- a/docs/sources/features/datasources/testdata.md +++ b/docs/sources/features/datasources/testdata.md @@ -11,26 +11,20 @@ weight = 20 # Grafana TestData - > NOTE: This plugin is disable by default. - The purpose of this data sources is to make it easier to create fake data for any panel. Using `Grafana TestData` you can build your own time series and have any panel render it. This make is much easier to verify functionally since the data can be shared very ## Enable -`Grafana TestData` is not enabled by default. To enable it you have to go to `/plugins/testdata/edit` and click the enable button to enable it for each server. +`Grafana TestData` is not enabled by default. To enable it you have to go to `/plugins/testdata/edit` and click the enable button to enable. ## Create mock data. -Once `Grafana TestData` is enabled you use it as a datasource in the metric panel. +Once `Grafana TestData` is enabled you can use it as a data source in any metric panel. ![](/img/docs/v41/test_data_add.png) -## Scenarios - -You can now choose different scenario that you want rendered in the drop down menu. If you have scenarios that you think should be added, please add them to `` and submit a pull request. - ## CSV The comma separated values scenario is the most powerful one since it lets you create any kind of graph you like. @@ -38,7 +32,6 @@ Once you provided the numbers `Grafana TestData` will distribute them evenly bas ![](/img/docs/v41/test_data_csv_example.png) - ## Dashboards `Grafana TestData` also contains some dashboards with example. `/plugins/testdata/edit` diff --git a/docs/sources/features/panels/dashlist.md b/docs/sources/features/panels/dashlist.md index 5377c7c1941..b6f7d21c1c8 100644 --- a/docs/sources/features/panels/dashlist.md +++ b/docs/sources/features/panels/dashlist.md @@ -2,6 +2,7 @@ title = "Dashboard List" keywords = ["grafana", "dashboard list", "documentation", "panel", "dashlist"] type = "docs" +aliases = ["/reference/dashlist/"] [menu.docs] name = "Dashboard list" parent = "panels" diff --git a/docs/sources/features/panels/graph.md b/docs/sources/features/panels/graph.md index 7fbebcfeb97..fdd3f271184 100644 --- a/docs/sources/features/panels/graph.md +++ b/docs/sources/features/panels/graph.md @@ -2,6 +2,7 @@ title = "Graph Panel" keywords = ["grafana", "graph panel", "documentation", "guide", "graph"] type = "docs" +aliases = ["/reference/graph/"] [menu.docs] name = "Graph" parent = "panels" @@ -18,7 +19,8 @@ Clicking the title for a panel exposes a menu. The `edit` option opens addition options for the panel. ## General -![](/img/docs/v2/graph_general.png) + +![](/img/docs/v43/graph_general.png) The general tab allows customization of a panel's appearance and menu options. @@ -31,14 +33,14 @@ The general tab allows customization of a panel's appearance and menu options. ### Drilldown / detail link The drilldown section allows adding dynamic links to the panel that can link to other dashboards -or URLs +or URLs. Each link has a title, a type and params. A link can be either a ``dashboard`` or ``absolute`` links. -If it is a dashboard links, the `dashboard` value must be the name of a dashboard. If it's an -`absolute` link, the URL is the URL to link. +If it is a dashboard link, the `dashboard` value must be the name of a dashboard. If it is an +`absolute` link, the URL is the URL to the link. ``params`` allows adding additional URL params to the links. The format is the ``name=value`` with -multiple params separate by ``&``. Template variables can be added as values using ``$myvar``. +multiple params separated by ``&``. Template variables can be added as values using ``$myvar``. When linking to another dashboard that uses template variables, you can use ``var-myvar=value`` to populate the template variable to a desired value from the link. @@ -50,7 +52,7 @@ options. ## Axes & Grid -![](/img/docs/v2/graph_axes_grid_options.png) +![](/img/docs/v43/graph_axes_grid_options.png) The Axes & Grid tab controls the display of axes, grids and legend. @@ -74,7 +76,6 @@ values can be hidden from the legend using the ``Hide empty`` checkbox. ### Legend Values Additional values can be shown along-side the legend names: - - ``Total`` - Sum of all values returned from metric query - ``Current`` - Last value returned from the metric query - ``Min`` - Minimum of all values returned from metric query @@ -83,16 +84,16 @@ Additional values can be shown along-side the legend names: - ``Decimals`` - Controls how many decimals are displayed for legend values (and graph hover tooltips) The legend values are calculated client side by Grafana and depend on what type of -aggregation or point consolidation you metric query is using. All the above legend values cannot +aggregation or point consolidation your metric query is using. All the above legend values cannot be correct at the same time. For example if you plot a rate like requests/second, this is probably using average as aggregator, then the Total in the legend will not represent the total number of requests. It is just the sum of all data points received by Grafana. ## Display styles -![](/img/docs/v2/graph_display_styles.png) +![](/img/docs/v43/graph_display_styles.png) -Display styles controls properties of the graph. +Display styles control visual properties of the graph. ### Thresholds @@ -108,19 +109,19 @@ the graph crosses a particular threshold. ### Line Options -- ``Line Fill`` - Amount of color fill for a series. 0 is none. +- ``Line Fill`` - Amount of color fill for a series. 0 is none. - ``Line Width`` - The width of the line for a series. - ``Null point mode`` - How null values are displayed - ``Staircase line`` - Draws adjacent points as staircase ### Multiple Series -If there are multiple series, they can be display as a group. +If there are multiple series, they can be displayed as a group. - ``Stack`` - Each series is stacked on top of another -- ``Percent`` - Each series is draw as a percent of the total of all series +- ``Percent`` - Each series is drawn as a percentage of the total of all series -If you have stack enabled you can select what the mouse hover feature should show. +If you have stack enabled, you can select what the mouse hover feature should show. - Cumulative - Sum of series below plus the series you hover over - Individual - Just the value for the series you hover over @@ -134,11 +135,15 @@ If you have stack enabled you can select what the mouse hover feature should sho - ``All series`` - Show all series on the same tooltip and a x crosshairs to help follow all series -### Series specific overrides +### Series Specific Overrides -The section allows a series to be render different from the rest. For example, one series can be given -a thicker line width to make it standout. +The section allows a series to be rendered differently from the others. For example, one series can be given +a thicker line width to make it stand out. -## Time range +#### Dashes Drawing Style + +There is an option under Series overrides to draw lines as dashes. Set Dashes to the value True to override the line draw setting for a specific series. + +## Time Range ![](/img/docs/v2/graph_time_range.png) diff --git a/docs/sources/features/panels/heatmap.md b/docs/sources/features/panels/heatmap.md new file mode 100644 index 00000000000..e44527f8695 --- /dev/null +++ b/docs/sources/features/panels/heatmap.md @@ -0,0 +1,108 @@ ++++ +title = "Heatmap Panel" +description = "Heatmap panel documentation" +keywords = ["grafana", "heatmap", "panel", "documentation"] +type = "docs" +[menu.docs] +name = "Heatmap" +parent = "panels" +weight = 3 ++++ + +# Heatmap Panel + +![](/img/docs/v43/heatmap_panel_cover.jpg) + +> New panel only available in Grafana v4.3+ + +The Heatmap panel allows you to view histograms over time. To fully understand and use this panel you need +understand what Histograms are and how they are created. Read on below to for a quick introduction to the +term Histogram. + +## Histograms and buckets + +A histogram is a graphical representation of the distribution of numerical data. You group values into buckets +(some times also called bins) and then count how many values fall into each bucket. Instead +of graphing the actual values you then graph the buckets. Each bar represents a bucket +and the bar height represents the frequency (i.e. count) of values that fell into that bucket's interval. + +Example Histogram: + +![](/img/docs/v43/heatmap_histogram.png) + +The above histogram shows us that most value distribution of a couple of time series. We can easily see that +most values land between 240-300 with a peak between 260-280. Histograms just look at value distributions +over specific time range. So you cannot see any trend or changes in the distribution over time, +this is where heatmaps become useful. + +## Heatmap + +A Heatmap is like a histogram but over time where each time slice represents its own +histogram. Instead of using bar height as a representation of frequency you use cells and color +the cell proportional to the number of values in the bucket. + +Example: + +![](/img/docs/v43/heatmap_histogram_over_time.png) + +Here we can clearly see what values are more common and how they trend over time. + +## Data Options + +Data and bucket options can be found in the `Axes` tab. + +### Data Formats + +Data format | Description +------------ | ------------- +*Time series* | Grafana does the bucketing by going through all time series values. The bucket sizes & intervals will be determined using the Buckets options. +*Time series buckets* | Each time series already represents a Y-Axis bucket. The time series name (alias) needs to be a numeric value representing the upper interval for the bucket. Grafana does no bucketing so the bucket size options are hidden. + +### Bucket Size + +The Bucket count & size options are used by Grafana to calculate how big each cell in the heatmap is. You can +define the bucket size either by count (the first input box) or by specifying a size interval. For the Y-Axis +the size interval is just a value but for the X-bucket you can specify a time range in the *Size* input, for example, +the time range `1h`. This will make the cells 1h wide on the X-axis. + +### Pre-bucketed data + +If you have a data that is already organized into buckets you can use the `Time series buckets` data format. This format requires that your metric query return regular time series and that each time series has a numeric name +that represent the upper or lower bound of the interval. + +The only data source that supports histograms over time is Elasticsearch. You do this by adding a *Histogram* +bucket aggregation before the *Date Histogram*. + +![](/img/docs/v43/elastic_histogram.png) + +You control the size of the buckets using the Histogram interval (Y-Axis) and the Date Histogram interval (X-axis). + +## Display Options + +In the heatmap *Display* tab you define how the cells are rendered and what color they are assigned. + +### Color Mode & Spectrum + +{{< imgbox max-width="40%" img="/img/docs/v43/heatmap_scheme.png" caption="Color spectrum" >}} + +The color spectrum controls the mapping between value count (in each bucket) and the color assigned to each bucket. +The left most color on the spectrum represents the minimum count and the color on the right most side represents the +maximum count. Some color schemes are automatically inverted when using the light theme. + +You can also change the color mode to `Opacity`. In this case, the color will not change but the amount of opacity will +change with the bucket count. + +## Raw data vs aggregated + +If you use the heatmap with regular time series data (not pre-bucketed). Then it's important to keep in mind that your data +is often already by aggregated by your time series backend. Most time series queries do not return raw sample data +but include a group by time interval or maxDataPoints limit coupled with an aggregation function (usually average). + +This all depends on the time range of your query of course. But the important point is to know that the Histogram bucketing +that Grafana performs may be done on already aggregated and averaged data. To get more accurate heatmaps it is better +to do the bucketing during metric collection or store the data in Elasticsearch, which currently is the only data source +data supports doing Histogram bucketing on the raw data. + +If you remove or lower the group by time (or raise maxDataPoints) in your query to return more data points your heatmap will be +more accurate but this can also be very CPU & Memory taxing for your browser and could cause hangs and crashes if the number of +data points becomes unreasonably large. diff --git a/docs/sources/features/panels/singlestat.md b/docs/sources/features/panels/singlestat.md index 9f2edb7df05..eb9b2f26ea5 100644 --- a/docs/sources/features/panels/singlestat.md +++ b/docs/sources/features/panels/singlestat.md @@ -2,6 +2,7 @@ title = "Singlestat Panel" keywords = ["grafana", "dashboard", "documentation", "panels", "singlestat"] type = "docs" +aliases = ["/reference/singlestat/"] [menu.docs] name = "Singlestat" parent = "panels" diff --git a/docs/sources/features/panels/table_panel.md b/docs/sources/features/panels/table_panel.md index 249886e1f57..69cd02fdbcc 100644 --- a/docs/sources/features/panels/table_panel.md +++ b/docs/sources/features/panels/table_panel.md @@ -2,6 +2,7 @@ title = "Table Panel" keywords = ["grafana", "dashboard", "documentation", "panels", "table panel"] type = "docs" +aliases = ["/reference/table/"] [menu.docs] name = "Table" parent = "panels" @@ -84,8 +85,9 @@ The column styles allow you control how dates and numbers are formatted. 1. `Name or regex`: The Name or Regex field controls what columns the rule should be applied to. The regex or name filter will be matched against the column name not against column values. 2. `Type`: The three supported types of types are `Number`, `String` and `Date`. -3. `Format`: Specify date format. Only available when `Type` is set to `Date`. -4. `Coloring` and `Thresholds`: Specify color mode and thresholds limits. -5. `Unit` and `Decimals`: Specify unit and decimal precision for numbers. -6. `Add column style rule`: Add new column rule. +3. `Title`: Title for the column, when using a Regex the title can include replacement strings like `$1`. +4. `Format`: Specify date format. Only available when `Type` is set to `Date`. +5. `Coloring` and `Thresholds`: Specify color mode and thresholds limits. +6. `Unit` and `Decimals`: Specify unit and decimal precision for numbers. +7. `Add column style rule`: Add new column rule. diff --git a/docs/sources/guides/whats-new-in-v4-2.md b/docs/sources/guides/whats-new-in-v4-2.md index 44aa3a45dc0..4b140a9027e 100644 --- a/docs/sources/guides/whats-new-in-v4-2.md +++ b/docs/sources/guides/whats-new-in-v4-2.md @@ -12,7 +12,7 @@ weight = -1 ## Whats new in Grafana v4.2 -Grafana v4.2 Beta is now [available for download](/download/4_2_0/). +Grafana v4.2 Beta is now [available for download](https://grafana.com/grafana/download/4.2.0). Just like the last release this one contains lots bug fixes and minor improvements. We are very happy to say that 27 of 40 issues was closed by pull requests from the community. Big thumbs up! diff --git a/docs/sources/guides/whats-new-in-v4-3.md b/docs/sources/guides/whats-new-in-v4-3.md new file mode 100644 index 00000000000..3290f3cd990 --- /dev/null +++ b/docs/sources/guides/whats-new-in-v4-3.md @@ -0,0 +1,105 @@ ++++ +title = "What's New in Grafana v4.3" +description = "Feature & improvement highlights for Grafana v4.3" +keywords = ["grafana", "new", "documentation", "4.3.0"] +type = "docs" +[menu.docs] +name = "Version 4.3" +identifier = "v4.3" +parent = "whatsnew" +weight = -2 ++++ + +## What's New in Grafana v4.3 + +Grafana v4.3 Beta is now [available for download](https://grafana.com/grafana/download/4.3.0-beta1). + +## Release Highlights + +- New [Heatmap Panel](http://docs.grafana.org/features/panels/heatmap/) +- Graph Panel Histogram Mode +- Elasticsearch Histogram Aggregation +- Prometheus Table data format +- New [MySQL Data Source](http://docs.grafana.org/features/datasources/mysql/) (alpha version to get some early feedback) +- 60+ small fixes and improvements, most of them contributed by our fantastic community! + +Check out the [New Features in v4.3 Dashboard](http://play.grafana.org/dashboard/db/new-features-in-v4-3?orgId=1) on the Grafana Play site for a showcase of these new features. + +## Histogram Support + +A Histogram is a kind of bar chart that groups numbers into ranges, often called buckets or bins. Taller bars show that more data falls in that range. + +The Graph Panel now supports Histograms. + +![](/img/docs/v43/heatmap_histogram.png) + +## Histogram Aggregation Support for Elasticsearch + +Elasticsearch is the only supported data source that can return pre-bucketed data (data that is already grouped into ranges). With other data sources there is a risk of returning inaccurate data in a histogram due to using already aggregated data rather than raw data. This release adds support for Elasticsearch pre-bucketed data that can be visualized with the new [Heatmap Panel](http://docs.grafana.org/features/panels/heatmap/). + +## Heatmap Panel + +The Histogram support in the Graph Panel does not show changes over time - it aggregates all the data together for the chosen time range. To visualize a histogram over time, we have built a new [Heatmap Panel](http://docs.grafana.org/features/panels/heatmap/). + +Every column in a Heatmap is a histogram snapshot. Instead of visualizing higher values with higher bars, a heatmap visualizes higher values with color. The histogram shown above is equivalent to one column in the heatmap shown below. + +![](/img/docs/v43/heatmap_histogram_over_time.png) + +The Heatmap panel also works with Elasticsearch Histogram Aggregations for more accurate server side bucketing. + +![](/assets/img/blog/v4/elastic_heatmap.jpg) + +## MySQL Data Source (alpha) + +This release includes a [new core data source for MySQL](http://docs.grafana.org/features/datasources/mysql/). You can write any possible MySQL query and format it as either Time Series or Table Data allowing it be used with the Graph Panel, Table Panel and SingleStat Panel. + +We are still working on the MySQL data source. As it's missing some important features, like templating and macros and future changes could be breaking, we are +labeling the state of the data source as Alpha. Instead of holding up the release of v4.3 we are including it in its current shape to get some early feedback. So please try it out and let us know what you think on [twitter](https://twitter.com/intent/tweet?text=.%40grafana&source=4_3_beta_blog&related=blog) or on our [community forum](https://community.grafana.com/c/releases). Is this a feature that you would use? How can we make it better? + +**The query editor can show the generated and interpolated SQL that is sent to the MySQL server.** + +![](/img/docs/v43/mysql_table_query.png) + +**The query editor will also show any errors that resulted from running the query (very useful when you have a syntax error!).** + +![](/img/docs/v43/mysql_query_error.png) + +## Health Check Endpoint + +Now you can monitor the monitoring with the Health Check Endpoint! The new `/api/health` endpoint returns HTTP 200 OK if everything is up and HTTP 503 Error if the Grafana database cannot be pinged. + +## Lazy Load Panels + +Grafana now delays loading panels until they become visible (scrolled into view). This means panels out of view are not sending requests thereby reducing the load on your time series database. + +## Prometheus - Table Data (column per label) + +The Prometheus data source now supports the Table Data format by automatically assigning a column to a label. This makes it really easy to browse data in the table panel. + +![](/img/docs/v43/prom_table_cols_as_labels.png) + +## Other Highlights From The Changelog + +Changes: + +- **Table**: Support to change column header text [#3551](https://github.com/grafana/grafana/issues/3551) +- **InfluxDB**: influxdb query builder support for ORDER BY and LIMIT (allows TOPN queries) [#6065](https://github.com/grafana/grafana/issues/6065) Support influxdb's SLIMIT Feature [#7232](https://github.com/grafana/grafana/issues/7232) thx [@thuck](https://github.com/thuck) +- **Graph**: Support auto grid min/max when using log scale [#3090](https://github.com/grafana/grafana/issues/3090), thx [@bigbenhur](https://github.com/bigbenhur) +- **Prometheus**: Make Prometheus query field a textarea [#7663](https://github.com/grafana/grafana/issues/7663), thx [@hagen1778](https://github.com/hagen1778) +- **Server**: Support listening on a UNIX socket [#4030](https://github.com/grafana/grafana/issues/4030), thx [@mitjaziv](https://github.com/mitjaziv) + +Fixes: + +- **MySQL**: 4-byte UTF8 not supported when using MySQL database (allows Emojis in Dashboard Names) [#7958](https://github.com/grafana/grafana/issues/7958) +- **Dashboard**: Description tooltip is not fully displayed [#7970](https://github.com/grafana/grafana/issues/7970) + +Lots more enhancements and fixes can be found in the [Changelog](https://github.com/grafana/grafana/blob/master/CHANGELOG.md). + +## Download + +Head to the [v4.3 download page](https://grafana.com/grafana/download) for download links & instructions. + +## Thanks + +A big thanks to all the Grafana users who contribute by submitting PRs, bug reports, helping out on our [community site](https://community.grafana.com/) and providing feedback! + diff --git a/docs/sources/http_api/admin.md b/docs/sources/http_api/admin.md index 950ea4c8202..1efe073e94b 100644 --- a/docs/sources/http_api/admin.md +++ b/docs/sources/http_api/admin.md @@ -237,12 +237,14 @@ Change password for specific user Accept: application/json Content-Type: application/json + {"password":"userpassword"} + **Example Response**: HTTP/1.1 200 Content-Type: application/json - {"password":"userpassword"} + {"message": "User password updated"} ## Permissions @@ -254,6 +256,8 @@ Change password for specific user Accept: application/json Content-Type: application/json + {"isGrafanaAdmin": true} + **Example Response**: HTTP/1.1 200 @@ -280,14 +284,22 @@ Change password for specific user ## Pause all alerts -`DELETE /api/admin/pause-all-alerts` +`POST /api/admin/pause-all-alerts` **Example Request**: - DELETE /api/admin/pause-all-alerts HTTP/1.1 + POST /api/admin/pause-all-alerts HTTP/1.1 Accept: application/json Content-Type: application/json + { + "paused": true + } + +JSON Body schema: + +- **paused** – If true then all alerts are to be paused, false unpauses all alerts. + **Example Response**: HTTP/1.1 200 diff --git a/docs/sources/http_api/alerting.md b/docs/sources/http_api/alerting.md index cc7ce4c2650..1aab7253373 100644 --- a/docs/sources/http_api/alerting.md +++ b/docs/sources/http_api/alerting.md @@ -12,8 +12,8 @@ parent = "http_api" # Alerting API -You can use the Alerting API to get information about alerts and their states but this API cannot be used to modify the alert. -To create new alerts or modify them you need to update the dashboard json that contains the alerts. +You can use the Alerting API to get information about alerts and their states but this API cannot be used to modify the alert. +To create new alerts or modify them you need to update the dashboard json that contains the alerts. This API can also be used to create, update and delete alert notifications. @@ -28,6 +28,17 @@ This API can also be used to create, update and delete alert notifications. Content-Type: application/json Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + Querystring Parameters: + + These parameters are used as querystring parameters. For example: + + `/api/alerts?dashboardId=1` + + - **dashboardId** – Return alerts for a specified dashboard. + - **panelId** – Return alerts for a specified panel on a dashboard. + - **limit** - Limit response to x number of alerts. + - **state** - Return alerts with one or more of the following alert states: `ALL`,`no_data`, `paused`, `alerting`, `ok`, `pending`. To specify multiple states use the following format: `?state=paused&state=alerting` + **Example Response**: HTTP/1.1 200 @@ -40,6 +51,13 @@ This API can also be used to create, update and delete alert notifications. "name": "fire place sensor", "message": "Someone is trying to break in through the fire place", "state": "alerting", + "evalDate": "0001-01-01T00:00:00Z", + "evalData": [ + { + "metric": "fire", + "tags": null, + "value": 5.349999999999999 + } "newStateDate": "2016-12-25", "executionError": "", "dashboardUri": "http://grafana.com/dashboard/db/sensors" @@ -73,7 +91,6 @@ This API can also be used to create, update and delete alert notifications. "dashboardUri": "http://grafana.com/dashboard/db/sensors" } - ## Pause alert `POST /api/alerts/:id/pause` @@ -86,10 +103,15 @@ This API can also be used to create, update and delete alert notifications. Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk { - "alertId": 1, "paused": true } +The :id query parameter is the id of the alert to be paused or unpaused. + +JSON Body Schema: + +- **paused** – Can be `true` or `false`. True to pause an alert. False to unpause an alert. + **Example Response**: HTTP/1.1 200 @@ -111,11 +133,13 @@ This API can also be used to create, update and delete alert notifications. Content-Type: application/json Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + **Example Response**: HTTP/1.1 200 Content-Type: application/json - + { "id": 1, "name": "Team A", @@ -127,11 +151,11 @@ This API can also be used to create, update and delete alert notifications. ## Create alert notification -`POST /api/alerts-notifications` +`POST /api/alert-notifications` **Example Request**: - POST /api/alerts-notifications HTTP/1.1 + POST /api/alert-notifications HTTP/1.1 Accept: application/json Content-Type: application/json Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk @@ -144,29 +168,29 @@ This API can also be used to create, update and delete alert notifications. "addresses": "carl@grafana.com;dev@grafana.com" } } - + **Example Response**: HTTP/1.1 200 Content-Type: application/json { - "id": 1, + "id": 1, "name": "new alert notification", "type": "email", "isDefault": false, "settings": { addresses: "carl@grafana.com;dev@grafana.com"} } - "created": "2017-01-01 12:34", + "created": "2017-01-01 12:34", "updated": "2017-01-01 12:34" } ## Update alert notification -`PUT /api/alerts-notifications/1` +`PUT /api/alert-notifications/1` **Example Request**: - PUT /api/alerts-notifications/1 HTTP/1.1 + PUT /api/alert-notifications/1 HTTP/1.1 Accept: application/json Content-Type: application/json Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk @@ -176,33 +200,33 @@ This API can also be used to create, update and delete alert notifications. "name": "new alert notification", //Required "type": "email", //Required "isDefault": false, - "settings": { + "settings": { "addresses: "carl@grafana.com;dev@grafana.com" } } - + **Example Response**: HTTP/1.1 200 Content-Type: application/json { - "id": 1, + "id": 1, "name": "new alert notification", "type": "email", "isDefault": false, "settings": { addresses: "carl@grafana.com;dev@grafana.com"} } - "created": "2017-01-01 12:34", + "created": "2017-01-01 12:34", "updated": "2017-01-01 12:34" } ## Delete alert notification -`DELETE /api/alerts-notifications/:notificationId` +`DELETE /api/alert-notifications/:notificationId` **Example Request**: - DELETE /api/alerts-notifications/1 HTTP/1.1 + DELETE /api/alert-notifications/1 HTTP/1.1 Accept: application/json Content-Type: application/json Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk diff --git a/docs/sources/http_api/auth.md b/docs/sources/http_api/auth.md index ef62f271715..d8ded124ac5 100644 --- a/docs/sources/http_api/auth.md +++ b/docs/sources/http_api/auth.md @@ -41,3 +41,80 @@ You use the token in all requests in the `Authorization` header, like this: Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk The `Authorization` header value should be `Bearer `. + +# Auth HTTP resources / actions + +## Api Keys + +`GET /api/auth/keys` + +**Example Request**: + + GET /api/auth/keys HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + +**Example Response**: + + HTTP/1.1 200 + Content-Type: application/json + + [ + { + "id": 3, + "name": "API", + "role": "Admin" + }, + { + "id": 1, + "name": "TestAdmin", + "role": "Admin" + } + ] + +## Create API Key + +`POST /api/auth/keys` + +**Example Request**: + + POST /api/auth/keys HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "name": "mykey", + "role": "Admin" + } + +JSON Body schema: + +- **name** – The key name +- **role** – Sets the access level/Grafana Role for the key. Can be one of the following values: `Viewer`, `Editor`, `Read Only Editor` or `Admin`. + +**Example Response**: + + HTTP/1.1 200 + Content-Type: application/json + + {"name":"mykey","key":"eyJrIjoiWHZiSWd3NzdCYUZnNUtibE9obUpESmE3bzJYNDRIc0UiLCJuIjoibXlrZXkiLCJpZCI6MX1="} + +## Delete API Key + +`DELETE /api/auth/keys/:id` + +**Example Request**: + + DELETE /api/auth/keys/3 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + +**Example Response**: + + HTTP/1.1 200 + Content-Type: application/json + + {"message":"API key deleted"} diff --git a/docs/sources/http_api/dashboard.md b/docs/sources/http_api/dashboard.md index 12d46dc6aa4..a7b7ae87a62 100644 --- a/docs/sources/http_api/dashboard.md +++ b/docs/sources/http_api/dashboard.md @@ -19,26 +19,28 @@ Creates a new dashboard or updates an existing dashboard. **Example Request for new dashboard**: - POST /api/dashboards/db HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk +```http +POST /api/dashboards/db HTTP/1.1 +Accept: application/json +Content-Type: application/json +Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - { - "dashboard": { - "id": null, - "title": "Production Overview", - "tags": [ "templated" ], - "timezone": "browser", - "rows": [ - { - } - ], - "schemaVersion": 6, - "version": 0 - }, - "overwrite": false - } +{ + "dashboard": { + "id": null, + "title": "Production Overview", + "tags": [ "templated" ], + "timezone": "browser", + "rows": [ + { + } + ], + "schemaVersion": 6, + "version": 0 + }, + "overwrite": false +} +``` JSON Body schema: @@ -47,15 +49,17 @@ JSON Body schema: **Example Response**: - HTTP/1.1 200 OK - Content-Type: application/json; charset=UTF-8 - Content-Length: 78 +```http +HTTP/1.1 200 OK +Content-Type: application/json; charset=UTF-8 +Content-Length: 78 - { - "slug": "production-overview", - "status": "success", - "version": 1 - } +{ + "slug": "production-overview", + "status": "success", + "version": 1 +} +``` Status Codes: @@ -67,14 +71,16 @@ Status Codes: The **412** status code is used when a newer dashboard already exists (newer, its version is greater than the version that was sent). The same status code is also used if another dashboard exists with the same title. The response body will look like this: - HTTP/1.1 412 Precondition Failed - Content-Type: application/json; charset=UTF-8 - Content-Length: 97 +```http +HTTP/1.1 412 Precondition Failed +Content-Type: application/json; charset=UTF-8 +Content-Length: 97 - { - "message": "The dashboard has been changed by someone else", - "status": "version-mismatch" - } +{ + "message": "The dashboard has been changed by someone else", + "status": "version-mismatch" +} +``` In in case of title already exists the `status` property will be `name-exists`. @@ -86,34 +92,38 @@ Will return the dashboard given the dashboard slug. Slug is the url friendly ver **Example Request**: - GET /api/dashboards/db/production-overview HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk +```http +GET /api/dashboards/db/production-overview HTTP/1.1 +Accept: application/json +Content-Type: application/json +Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk +``` **Example Response**: - HTTP/1.1 200 - Content-Type: application/json +```http +HTTP/1.1 200 +Content-Type: application/json - { - "meta": { - "isStarred": false, - "slug": "production-overview" - }, - "dashboard": { - "id": null, - "title": "Production Overview", - "tags": [ "templated" ], - "timezone": "browser", - "rows": [ - { - } - ], - "schemaVersion": 6, - "version": 0 +{ + "meta": { + "isStarred": false, + "slug": "production-overview" + }, + "dashboard": { + "id": null, + "title": "Production Overview", + "tags": [ "templated" ], + "timezone": "browser", + "rows": [ + { } - } + ], + "schemaVersion": 6, + "version": 0 + } +} +``` ## Delete dashboard @@ -123,17 +133,21 @@ The above will delete the dashboard with the specified slug. The slug is the url **Example Request**: - DELETE /api/dashboards/db/test HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk +```http +DELETE /api/dashboards/db/test HTTP/1.1 +Accept: application/json +Content-Type: application/json +Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk +``` **Example Response**: - HTTP/1.1 200 - Content-Type: application/json +```http +HTTP/1.1 200 +Content-Type: application/json - {"title": "Test"} +{"title": "Test"} +``` ## Gets the home dashboard @@ -221,10 +235,6 @@ Get all tags of dashboards } ] -## Dashboard from JSON file - -`GET /file/:file` - ## Search Dashboards `GET /api/search/` diff --git a/docs/sources/installation/configuration.md b/docs/sources/installation/configuration.md index 176f843e86b..20f191b26e1 100644 --- a/docs/sources/installation/configuration.md +++ b/docs/sources/installation/configuration.md @@ -229,6 +229,10 @@ Used for signing keep me logged in / remember me cookies. Set to `true` to disable the use of Gravatar for user profile images. Default is `false`. +### data_source_proxy_whitelist + +Define a white list of allowed ips/domains to use in data sources. Format: `ip_or_domain:port` separated by spaces +
## [users] @@ -313,7 +317,6 @@ example: auth_url = https://github.com/login/oauth/authorize token_url = https://github.com/login/oauth/access_token api_url = https://api.github.com/user - allow_sign_up = false team_ids = allowed_organizations = @@ -441,20 +444,29 @@ false only pre-existing Grafana users will be able to login (if ldap authenticat
## [auth.proxy] + This feature allows you to handle authentication in a http reverse proxy. ### enabled + Defaults to `false` ### header_name + Defaults to X-WEBAUTH-USER #### header_property + Defaults to username but can also be set to email ### auto_sign_up + Set to `true` to enable auto sign up of users who do not exist in Grafana DB. Defaults to `true`. +### whitelist + +Limit where auth proxy requests come from by configuring a list of IP addresses. This can be used to prevent users spoofing the X-WEBAUTH-USER header. +
## [session] @@ -644,6 +656,9 @@ Secret key. e.g. AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA ### url Url to where Grafana will send PUT request with images +### public_url +Optional parameter. Url to send to users in notifications, directly appended with the resulting uploaded file name. + ### username basic auth username diff --git a/docs/sources/installation/debian.md b/docs/sources/installation/debian.md index c82b7f1c0e5..b83ebdc4a12 100644 --- a/docs/sources/installation/debian.md +++ b/docs/sources/installation/debian.md @@ -15,15 +15,28 @@ weight = 1 Description | Download ------------ | ------------- -Stable for Debian-based Linux | [4.2.0 (x86-64 deb)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.2.0_amd64.deb) +Stable for Debian-based Linux | [grafana_4.3.1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.3.1_amd64.deb) + +Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing +installation. ## Install Stable +```bash +wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.3.1_amd64.deb +sudo apt-get install -y adduser libfontconfig +sudo dpkg -i grafana_4.3.1_amd64.deb ``` -$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.2.0_amd64.deb -$ sudo apt-get install -y adduser libfontconfig -$ sudo dpkg -i grafana_4.2.0_amd64.deb + + ## APT Repository @@ -40,18 +53,24 @@ candidates. Then add the [Package Cloud](https://packagecloud.io/grafana) key. This allows you to install signed packages. - $ curl https://packagecloud.io/gpg.key | sudo apt-key add - +```bash +curl https://packagecloud.io/gpg.key | sudo apt-key add - +``` Update your Apt repositories and install Grafana - $ sudo apt-get update - $ sudo apt-get install grafana +```bash +sudo apt-get update +sudo apt-get install grafana +``` On some older versions of Ubuntu and Debian you may need to install the `apt-transport-https` package which is needed to fetch packages over HTTPS. - $ sudo apt-get install -y apt-transport-https +```bash +sudo apt-get install -y apt-transport-https +``` ## Package details @@ -67,7 +86,9 @@ HTTPS. Start Grafana by running: - $ sudo service grafana-server start +```bash +sudo service grafana-server start +``` This will start the `grafana-server` process as the `grafana` user, which was created during the package installation. The default HTTP port @@ -75,19 +96,25 @@ is `3000` and default user and group is `admin`. To configure the Grafana server to start at boot time: - $ sudo update-rc.d grafana-server defaults +```bash +sudo update-rc.d grafana-server defaults +``` ## Start the server (via systemd) To start the service using systemd: - $ systemctl daemon-reload - $ systemctl start grafana-server - $ systemctl status grafana-server +```bash +systemctl daemon-reload +systemctl start grafana-server +systemctl status grafana-server +``` Enable the systemd service so that Grafana starts at boot. - sudo systemctl enable grafana-server.service +```bash +sudo systemctl enable grafana-server.service +``` ## Environment file diff --git a/docs/sources/installation/ldap.md b/docs/sources/installation/ldap.md index e8e1c8e57bd..769ca3fd1ba 100644 --- a/docs/sources/installation/ldap.md +++ b/docs/sources/installation/ldap.md @@ -73,7 +73,7 @@ email = "email" [[servers.group_mappings]] group_dn = "cn=admins,dc=grafana,dc=org" org_role = "Admin" -# The Grafana organization database id, optional, if left out the default org (id 1) will be used +# The Grafana organization database id, optional, if left out the default org (id 1) will be used. Setting this allows for multiple group_dn's to be assigned to the same org_role provided the org_id differs # org_id = 1 [[servers.group_mappings]] diff --git a/docs/sources/installation/migrating_to2.md b/docs/sources/installation/migrating_to2.md index e7e2e96b6ad..a5d6f793ba6 100644 --- a/docs/sources/installation/migrating_to2.md +++ b/docs/sources/installation/migrating_to2.md @@ -3,9 +3,6 @@ title = "Migrating from older versions" description = "Upgrading & Migrating Grafana from older versions" keywords = ["grafana", "configuration", "documentation", "migration"] type = "docs" -[menu.docs] -parent = "installation" -weight = 10 +++ # Migrating from older versions diff --git a/docs/sources/installation/provisioning.md b/docs/sources/installation/provisioning.md deleted file mode 100644 index c3969bcf32d..00000000000 --- a/docs/sources/installation/provisioning.md +++ /dev/null @@ -1,34 +0,0 @@ -+++ -title = "Installing via provisioning tools" -description = "Guide to install Grafana via provisioning tools like puppet & chef" -keywords = ["grafana", "provisioning", "documentation", "puppet", "chef", "ansible"] -type = "docs" -aliases = ["docs/provisioning"] -[menu.docs] -parent = "installation" -weight = 8 -+++ - - -# Installing via provisioning tools - -Here are links for how to install Grafana (and some include Graphite or -InfluxDB as well) via a provisioning system. These are not maintained by -any core Grafana team member and might be out of date. - -### Puppet - -* [forge.puppetlabs.com/bfraser/grafana](https://forge.puppetlabs.com/bfraser/grafana) - -### Ansible - -* [github.com/picotrading/ansible-grafana](https://github.com/picotrading/ansible-grafana) - -### Docker -* [github.com/grafana/grafana-docker](https://github.com/grafana/grafana-docker) - -### Chef - -* [github.com/JonathanTron/chef-grafana](https://github.com/JonathanTron/chef-grafana) -* [github.com/Nordstrom/grafana2-cookbook](https://github.com/Nordstrom/grafana2-cookbook) - diff --git a/docs/sources/installation/rpm.md b/docs/sources/installation/rpm.md index c0952328cbe..579e04472dc 100644 --- a/docs/sources/installation/rpm.md +++ b/docs/sources/installation/rpm.md @@ -15,25 +15,28 @@ weight = 2 Description | Download ------------ | ------------- -Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.2.0 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0-1.x86_64.rpm) +Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.3.1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.1-1.x86_64.rpm) + +Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing +installation. ## Install Stable You can install Grafana using Yum directly. - $ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0-1.x86_64.rpm + $ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.1-1.x86_64.rpm Or install manually using `rpm`. #### On CentOS / Fedora / Redhat: - $ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0-1.x86_64.rpm + $ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.1-1.x86_64.rpm $ sudo yum install initscripts fontconfig - $ sudo rpm -Uvh grafana-4.2.0-1.x86_64.rpm + $ sudo rpm -Uvh grafana-4.3.1-1.x86_64.rpm #### On OpenSuse: - $ sudo rpm -i --nodeps grafana-4.2.0-1.x86_64.rpm + $ sudo rpm -i --nodeps grafana-4.3.1-1.x86_64.rpm ## Install via YUM Repository diff --git a/docs/sources/installation/troubleshooting.md b/docs/sources/installation/troubleshooting.md index 6da8c4f64f8..a5a74c007aa 100644 --- a/docs/sources/installation/troubleshooting.md +++ b/docs/sources/installation/troubleshooting.md @@ -11,48 +11,24 @@ weight = 8 # Troubleshooting -This page is dedicated to helping you solve any problem you have getting -Grafana to work. Please review it before opening a new [GitHub -issue](https://github.com/grafana/grafana/issues/new) or asking a -question in the `#grafana` IRC channel on freenode. +## visualization & query issues -## General connection issues +The most common problems are related to the query & response from you data source. Even if it looks +like a bug or visualization issue in Grafana it is 99% of time a problem with the data source query or +the data source response. -When setting up Grafana for the first time you might experience issues -with Grafana being unable to query Graphite, OpenTSDB or InfluxDB. You -might not be able to get metric name completion or the graph might show -an error like this: +So make sure to check the query sent and the raw response, learn how in this guide: [How to troubleshoot metric query issues](https://community.grafana.com/t/how-to-troubleshoot-metric-query-issues/50) -![](/img/docs/v1/graph_timestore_error.png) +## Logging -For some types of errors, the `View details` link will show you error -details. For many types of HTTP connection errors, however, there is very -little information. The best way to troubleshoot these issues is use -the [Chrome developer tools](https://developer.chrome.com/devtools/index). -By pressing `F12` you can bring up the chrome dev tools. +If you encounter an error or problem it is a good idea to check the grafana server log. Usually +located at `/var/log/grafana/grafana.log` on unix systems or in `/data/log` on +other platforms & manual installs. -![](/img/docs/v1/toubleshooting_chrome_dev_tools.png) +You can enable more logging by changing log level in you grafana configuration file. -There are two important tabs in the Chrome developer tools: `Network` -and `Console`. The `Console` tab will show you Javascript errors and -HTTP request errors. In the Network tab you will be able to identify the -request that failed and review request and response parameters. This -information will be of great help in finding the cause of the error. +## FAQ -If you are unable to solve the issue, even after reading the remainder -of this troubleshooting guide, you should open a [GitHub support -issue](https://github.com/grafana/grafana/issues). Before you do that -please search the existing closed or open issues. Also if you need to -create a support issue, screen shots and or text information about the -chrome console error, request and response information from the -`Network` tab in Chrome developer tools are of great help. - -### Inspecting Grafana metric requests - -![](/img/docs/v1/toubleshooting_chrome_dev_tools_network.png) - -After opening the Chrome developer tools for the first time the -`Network` tab is empty. You will need to refresh the page to get -requests to show. For some type of errors, especially CORS-related, -there might not be a response at all. +Checkout the [FAQ](https://community.grafana.com/c/howto/faq) section on our community page for frequently +asked questions. diff --git a/docs/sources/installation/upgrading.md b/docs/sources/installation/upgrading.md new file mode 100644 index 00000000000..846a42b454c --- /dev/null +++ b/docs/sources/installation/upgrading.md @@ -0,0 +1,103 @@ ++++ +title = "Upgrading" +description = "Upgrading Grafana guide" +keywords = ["grafana", "configuration", "documentation", "upgrade"] +type = "docs" +[menu.docs] +name = "Upgrading" +identifier = "upgrading" +parent = "installation" +weight = 10 ++++ + +# Upgrading Grafana + +We recommend everyone to upgrade Grafana often to stay up to date with the latest fixes and enhancements. +In order make this a reality Grafana upgrades are backward compatible and the upgrade process is simple & quick. + +Upgrading is generally always safe (between many minor and one major version) and dashboards and graphs will look the same. There can be minor breaking changes in some edge cases which are usually outlined in the [Release Notes](https://community.grafana.com/c/releases) and [Changelog](https://github.com/grafana/grafana/blob/master/CHANGELOG.md) + +## Database Backup + +Before upgrading it can be a good idea to backup your Grafana database. This will ensure that you can always rollback to your previous version. During startup, Grafana will automatically migrate the database schema (if there are changes or new tables). Sometimes this can cause issues if you later want to downgrade. + +#### sqlite + +If you use sqlite you only need to make a backup of you `grafana.db` file. This is usually located at `/var/lib/grafana/grafana.db` on unix system. +If you are unsure what database you use and where it is stored check you grafana configuration file. If you +installed grafana to custom location using a binary tar/zip it is usally in `/data`. + +#### mysql + +``` +backup: +> mysqldump -u root -p[root_password] [grafana] > grafana_backup.sql + +restore: +> mysql -u root -p grafana < grafana_backup.sql +``` + +#### postgres + +``` +backup: +> pg_dump grafana > grafana_backup + +restore: +> psql grafana < grafana_backup +``` + +### Ubuntu / Debian + +If you installed grafana by downloading a debian package (`.deb`) you can just follow the same installation guide +and execute the same `dpkg -i` command but with the new package. It will upgrade your Grafana install. + +If you used our APT repository: + +``` +sudo apt-get update +sudo apt-get install grafana +``` + +#### Upgrading from binary tar file + +If you downloaded the binary tar package you can just download and extract a new package +and overwrite all your existing files. But this might overwrite your config changes. We +recommend you place your config changes in a file named `/conf/custom.ini` +as this will make upgrades easier without risking losing your config changes. + +### Centos / RHEL + +If you installed grafana by downloading a rpm package you can just follow the same installation guide +and execute the same `yum install` or `rpm -i` command but with the new package. It will upgrade your Grafana install. + +If you used our YUM repository: + +``` +sudo yum update grafana +``` + +### Docker + +This just an example, details depend on how you configured your grafana container. +``` +docker pull grafana +docker stop my-grafana-container +docker rm my-grafana-container +docker run --name=my-grafana-container --restart=always -v /var/lib/grafana:/var/lib/grafana +``` + +### Windows + +If you downloaded the windows binary package you can just download a newer package and extract +to the same location (and overwrite the existing files). This might overwrite your config changes. We +recommend you place your config changes in a file named `/conf/custom.ini` +as this will make upgrades easier without risking losing your config changes. + +## Upgrading form 1.x + +[Migrating from 1.x to 2.x]({{< relref "installation/migrating_to2.md" >}}) + +## Upgrading form 2.x + +We are not aware of any issues upgrading directly from 2.x to 4.x but to on the safe side go via 3.x. diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md index 229654ef58e..dac8c560d4a 100644 --- a/docs/sources/installation/windows.md +++ b/docs/sources/installation/windows.md @@ -13,7 +13,10 @@ weight = 3 Description | Download ------------ | ------------- -Latest stable package for Windows | [grafana.4.2.0.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0.windows-x64.zip) +Latest stable package for Windows | [grafana.4.3.1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.1.windows-x64.zip) + +Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing +installation. ## Configure diff --git a/docs/sources/plugins/installation.md b/docs/sources/plugins/installation.md index f136d552f51..977abb055f1 100644 --- a/docs/sources/plugins/installation.md +++ b/docs/sources/plugins/installation.md @@ -99,6 +99,6 @@ To manually install a Plugin via the Grafana.com API: } ``` -4. Download the plugin with `https://grafana.com/api/plugins//versions//download` (for example: https://grafana.com/api/plugins/jdbranham-diagram-panel/versions/1.4.0/download). Unzip the downloaded file into the Grafana Server's `data/plugins` directory. +4. Download the plugin with `https://grafana.com/api/plugins//versions//download` (for example: https://grafana.com/api/plugins/jdbranham-diagram-panel/versions/1.4.0/download). Unzip the downloaded file into the Grafana Server's `plugins` directory. 5. Restart the Grafana Server. diff --git a/docs/sources/project/building_from_source.md b/docs/sources/project/building_from_source.md index 4b5291dc887..3d6673d2fb0 100644 --- a/docs/sources/project/building_from_source.md +++ b/docs/sources/project/building_from_source.md @@ -13,8 +13,8 @@ dev environment. Grafana ships with its own required backend server; also comple ## Dependencies -- [Go 1.8](https://golang.org/dl/) -- [NodeJS](https://nodejs.org/download/) +- [Go 1.8.1](https://golang.org/dl/) +- [NodeJS LTS](https://nodejs.org/download/) ## Get Code Create a directory for the project and set your path accordingly. Then download and install Grafana into your $GOPATH directory diff --git a/docs/sources/reference/annotations.md b/docs/sources/reference/annotations.md index 8f2170ba145..63b2f5fc327 100644 --- a/docs/sources/reference/annotations.md +++ b/docs/sources/reference/annotations.md @@ -13,42 +13,28 @@ weight = 2 Annotations provide a way to mark points on the graph with rich events. When you hover over an annotation you can get title, tags, and text information for the event. -![](/img/docs/v1/annotated_graph1.png) +![](/img/docs/annotations/toggles.png) -To add an annotation query click dashboard settings icon in top menu and select `Annotations` from the -dropdown. This will open the `Annotations` edit view. Click the `Add` tab to add a new annotation query. +## Queries -> Note: Annotations apply to all graphs in a given dashboard, not on a per-panel basis. +Annotatation events are fetched via annotation queries. To add a new annotation query to a dashboard +open the dashboard settings menu, then select `Annotations`. This will open the dashboard annotations +settings view. To create a new annotation query hit the `New` button. -## Graphite annotations +![](/img/docs/annotations/new_query.png) -Graphite supports two ways to query annotations. +Specify a name for the annotation query. This name is given to the toggle (checkbox) that will allow +you to enable/disable showing annotation events from this query. For example you might have two +annotation queries named `Deploys` and `Outages`. The toggles will allow you to decide what annotations +to show. -- A regular metric query, use the `Graphite target expression` text input for this -- Graphite events query, use the `Graphite event tags` text input, specify an tag or wildcard (leave empty should also work) +### Annotation query details -## Elasticsearch annotations -![](/img/docs/v2/annotations_es.png) +The annotation query options are different for each data source. -Grafana can query any Elasticsearch index for annotation events. The index name can be the name of an alias or an index wildcard pattern. -You can leave the search query blank or specify a lucene query. +- [Graphite annotation queries]({{< relref "features/datasources/graphite.md#annotations" >}}) +- [Elasticsearch annotation queries]({{< relref "features/datasources/elasticsearch.md#annotations" >}}) +- [InfluxDB annotation queries]({{< relref "features/datasources/influxdb.md#annotations" >}}) +- [Prometheus annotation queries]({{< relref "features/datasources/prometheus.md#annotations" >}}) -If your elasticsearch document has a timestamp field other than `@timestamp` you will need to specify that. As well -as the name for the fields that should be used for the annotation title, tags and text. Tags and text are optional. -> **Note** The annotation timestamp field in elasticsearch need to be in UTC format. - -## InfluxDB Annotations -![](/img/docs/v2/annotations_influxdb.png) - -For InfluxDB you need to enter a query like in the above screenshot. You need to have the ```where $timeFilter``` part. -If you only select one column you will not need to enter anything in the column mapping fields. - -## Prometheus Annotations - -![](/img/docs/v3/annotations_prom.png) - -Prometheus supports two ways to query annotations. - -- A regular metric query -- A Prometheus query for pending and firing alerts (for details see [Inspecting alerts during runtime](https://prometheus.io/docs/alerting/rules/#inspecting-alerts-during-runtime)) diff --git a/docs/sources/reference/dashboard.md b/docs/sources/reference/dashboard.md index 595a10e5081..3bbecfcef4d 100644 --- a/docs/sources/reference/dashboard.md +++ b/docs/sources/reference/dashboard.md @@ -65,7 +65,7 @@ Each field in the dashboard JSON is explained below with its usage: | **timezone** | timezone of dashboard, i.e. `utc` or `browser` | | **editable** | whether a dashboard is editable or not | | **hideControls** | whether row controls on the left in green are hidden or not | -| **graphTooltip** | TODO | +| **graphTooltip** | 0 for no shared crosshair or tooltip (default), 1 for shared crosshair, 2 for shared crosshair AND shared tooltip | | **rows** | row metadata, see [rows section](#rows) for details | | **time** | time range for dashboard, i.e. last 6 hours, last 7 days, etc | | **timepicker** | timepicker metadata, see [timepicker section](#timepicker) for details | diff --git a/docs/sources/reference/sharing.md b/docs/sources/reference/sharing.md index 5210b2811df..da89319f444 100644 --- a/docs/sources/reference/sharing.md +++ b/docs/sources/reference/sharing.md @@ -22,7 +22,7 @@ A dashboard snapshot is an instant way to share an interactive dashboard publicl (metric, template and annotation) and panel links, leaving only the visible metric data and series names embedded into your dashboard. Dashboard snapshots can be accessed by anyone who has the link and can reach the URL. -![](/img/docs/v2/dashboard_snapshot_dialog.png) +![](/img/docs/v4/share_panel_modal.png) ### Publish snapshots You can publish snapshots to you local instance or to [snapshot.raintank.io](http://snapshot.raintank.io). The later is a free service @@ -42,8 +42,8 @@ You can embed a panel using an iframe on another web site. This tab will show yo Example: ```html - + ``` Below there should be an interactive Grafana graph embedded in an iframe: - + diff --git a/docs/sources/reference/templating.md b/docs/sources/reference/templating.md index 636cc62a0c8..0405251f44d 100644 --- a/docs/sources/reference/templating.md +++ b/docs/sources/reference/templating.md @@ -10,76 +10,162 @@ weight = 1 # Templating - +Templating allows for more interactive and dynamic dashboards. Instead of hard-coding things like server, application +and sensor name in you metric queries you can use variables in their place. Variables are shown as dropdown select boxes at the top of +the dashboard. These dropdowns make it easy to change the data being displayed in your dashboard. -Dashboard Templating allows you to make your Dashboards more interactive and dynamic. + -They’re one of the most powerful and most used features of Grafana, and they’ve recently gotten even more attention in Grafana 2.0 and Grafana 2.1. +## What is a variable? -You can create Dashboard Template variables that can be used practically anywhere in a Dashboard: data queries on individual Panels (within the Query Editor), the names in your legends, or titles in Panels and Rows. +A variable is a placeholder for a value. You can use variables in metric queries and in panel titles. So when you change +the value, using the dropdown at the top of the dashboard, your panel's metric queries will change to reflect the new value. -You can configure Dashboard Templating by clicking the dropdown cog on the top of the Dashboard while viewing it. +### Interpolation + +Panel titles and metric queries can refer to variables using two different syntaxes: + +- `$` Example: apps.frontend.$server.requests.count +- `[[varname]]` Example: apps.frontend.[[server]].requests.count + +Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of word. Use +the second syntax in expressions like `my.server[[serverNumber]].count`. + +Before queries are sent to your data source the query is **interpolated**, meaning the variable is replaced with its current value. During +interpolation the variable value might be **escaped** in order to conform to the syntax of the query language and where it is used. +For example, a variable used in a regex expression in an InfluxDB or Prometheus query will be regex escaped. Read the data source specific +documentation article for details on value escaping during interpolation. + +### Variable options + +A variable is presented as a dropdown select box at the top of the dashboard. It has a current value and a set of **options**. The **options** +is the set of values you can choose from. + +## Adding a variable + + + +You add variables via Dashboard cogs menu > Templating. This opens up a list of variables and a `New` button to create a new variable. + +### Basic variable options + +Option | Description +------- | -------- +*Name* | The name of the variable, this is the name you use when you refer to your variable in your metric queries. Must be unique and contain no white-spaces. +*Label* | The name of the dropdown for this variable. +*Hide* | Options to hide the dropdown select box. +*Type* | Defines the variable type. -## Variable types +### Variable types -There are three different types of Template variables: query, custom, and interval. +Type | Description +------- | -------- +*Query* | This variable type allows you to write a data source query that usually returns a list of metric names, tag values or keys. For example, a query that returns a list of server names, sensor ids or data centers. +*Interval* | This variable can represent time spans. Instead of hard-coding a group by time or date histogram interval, use a variable of this type. +*Datasource* | This type allows you to quickly change the data source for an entire Dashboard. Useful if you have multiple instances of a data source in for example different environments. +*Custom* | Define the variable options manually using a comma separated list. +*Constant* | Define a hidden constant. Useful for metric path prefixes for dashboards you want to share. During dashboard export, constant variables will be made into an import option. +*Ad hoc filters* | Very special kind of variable that only works with some data sources, InfluxDB & Elasticsearch currently. It allows you to add key/value filters that will automatically be added to all metric queries that use the specified data source. -They can all be used to create dynamic variables that you can use throughout the Dashboard, but they differ in how they get the data for their values. +### Query options +This variable type is the most powerful and complex as it can dynamically fetch its options using a data source query. -### Query +Option | Description +------- | -------- +*Data source* | The data source target for the query. +*Refresh* | Controls when to update the variable option list (values in the dropdown). **On Dashboard Load** will slow down dashboard load as the variable query needs to be completed before dashboard can be initialized. Set this only to **On Time Range Change** if your variable options query contains a time range filter or is dependent on dashboard time range. +*Query* | The data source specific query expression. +*Regex* | Regex to filter or capture specific parts of the names return by your data source query. Optional. +*Sort* | Define sort order for options in dropdown. **Disabled** means that the order of options returned by your data source query will be used. - > Note: The Query type is Data Source specific. Please consult the appropriate documentation for your particular Data Source. +### Query expressions -Query is the most common type of Template variable. Use the `Query` template type to generate a dynamic list of variables, simply by allowing Grafana to explore your Data Source metric namespace when the Dashboard loads. +The query expressions are different for each data source. -For example a query like `prod.servers.*` will fill the variable with all possible values that exists in that wildcard position (in the case of the Graphite Data Source). +- [Graphite templating queries]({{< relref "features/datasources/graphite.md#templating" >}}) +- [Elasticsearch templating queries]({{< relref "features/datasources/elasticsearch.md#templating" >}}) +- [InfluxDB templating queries]({{< relref "features/datasources/influxdb.md#templating" >}}) +- [Prometheus templating queries]({{< relref "features/datasources/prometheus.md#templating" >}}) +- [OpenTSDB templating queries]({{< relref "features/datasources/prometheus.md#templating" >}}) -You can even create nested variables that use other variables in their definition. For example `apps.$app.servers.*` uses the variable $app in its own query definition. +One thing to note is that query expressions can contain references to other variables and in effect create linked variables. +Grafana will detect this and automatically refresh a variable when one of it's containing variables change. -You can utilize the special ** All ** value to allow the Dashboard user to query for every single Query variable returned. Grafana will automatically translate ** All ** into the appropriate format for your Data Source. +## Selection Options -#### Multi-select -As of Grafana 2.1, it is now possible to select a subset of Query Template variables (previously it was possible to select an individual value or 'All', not multiple values that were less than All). This is accomplished via the Multi-Select option. If enabled, the Dashboard user will be able to enable and disable individual variables. +Option | Description +------- | -------- +*Mulit-value* | If enabled, the variable will support the selection of multiple options at the same time. +*Include All option* | Add a special `All` option whose value includes all options. +*Custom all value* | By default the `All` value will include all options in combined expression. This can become very long and can have performance problems. Many times it can be better to specify a custom all value, like a wildcard regex. To make it possible to have custom regex, globs or lucene syntax in the **Custom all value** option it is never escaped so you will have to think avbout what is a valid value for your data source. -The Multi-Select functionality is taken a step further with the introduction of Multi-Select Tagging. This functionality allows you to group individual Template variables together under a Tag or Group name. +### Formating multiple values -For example, if you were using Templating to list all 20 of your applications, you could use Multi-Select Tagging to group your applications by function or region or criticality, etc. +Interpolating a variable with multiple values selected is tricky as it is not straight forward how to format the multiple values to into a string that +is valid in the given context where the variable is used. Grafana tries to solve this by allowing each data source plugin to +inform the templating interpolation engine what format to use for multiple values. - > Note: Multi-Select Tagging functionality is currently experimental but is part of Grafana 2.1. To enable this feature click the enable icon when editing Template options for a particular variable. +**Graphite**, for example, uses glob expressions. A variable with multiple values would, in this case, be interpolated as `{host1,host2,host3}` if +the current variable value was *host1*, *host2* and *host3*. - +**InfluxDB and Prometheus** uses regex expressions, so the same variable +would be interpolated as `(host1|host2|host3)`. Every value would also be regex escaped if not, a value with a regex control character would +break the regex expression. -Grafana gets the list of tags and the list of values in each tag by performing two queries on your metric namespace. +**Elasticsearch** uses lucene query syntax, so the same variable would, in this case, be formatted as `("host1" OR "host2" OR "host3")`. In this case every value +needs to be escaped so that the value can contain lucene control words and quotation marks. -The Tags query returns a list of Tags. +#### Formating troubles -The Tag values query returns the values for a given Tag. +Automatic escaping & formatting can cause problems and it can be tricky to grasp the logic is behind it. +Especially for InfluxDB and Prometheus where the use of regex syntax requires that the variable is used in regex operator context. +If you do not want Grafana to do this automatic regex escaping and formatting your only option is to disable the *Multi-value* or *Include All option* +options. -Note: a proof of concept shim that translates the metric query into a SQL call is provided. This allows you to maintain your tag:value mapping independently of your Data Source. +### Value groups/tags -Once configured, Multi-Select Tagging provides a convenient way to group and your template variables, and slice your data in the exact way you want. The Tags can be seen on the right side of the template pull-down. +If you have a lot of options in the dropdown for a multi-value variable. You can use this feature to group the values into selectable tags. -![](/img/docs/v2/multi-select.gif) +Option | Description +------- | -------- +*Tags query* | Data source query that should return a list of tags +*Tag values query* | Data source query that should return a list of values for a specified tag key. Use `$tag` in the query to refer the currently selected tag. -### Interval +![](/img/docs/v4/variable_dropdown_tags.png) -Use the `Interval` type to create Template variables around time ranges (eg. `1m`,`1h`, `1d`). There is also a special `auto` option that will change depending on the current time range, you can specify how many times the current time range should be divided to calculate the current `auto` range. +### Interval variables -![](/img/docs/v2/templated_variable_parameter.png) +Use the `Interval` type to create a variable that represents a time span (eg. `1m`,`1h`, `1d`). There is also a special `auto` option that will change depending on the current time range. You can specify how many times the current time range should be divided to calculate the current `auto` timespan. -### Custom +This variable type is useful as a parameter to group by time (for InfluxDB), Date histogram interval (for Elasticsearch) or as a *summarize* function parameter (for Graphite). -Use the `Custom` type to manually create Template variables around explicit values that are hard-coded into the Dashboard, and not dependent on any Data Source. You can specify multiple Custom Template values by separating them with a comma. +## Repeating Panels -## Repeating Panels and Repeating Rows +Template variables can be very useful to dynamically change your queries across a whole dashboard. If you want +Grafana to dynamically create new panels or rows based on what values you have selected you can use the *Repeat* feature. -Template Variables can be very useful to dynamically change what you're visualizing on a given panel. Sometimes, you might want to create entire new Panels (or Rows) based on what Template Variables have been selected. This is now possible in Grafana 2.1. +If you have a variable with `Multi-value` or `Include all value` options enabled you can choose one panel or one row and have Grafana repeat that row +for every selected value. You find this option under the General tab in panel edit mode. Select the variable to repeat by, and a `min span`. +The `min span` controls how small Grafana will make the panels (if you have many values selected). Grafana will automatically adjust the width of +each repeated panel so that the whole row is filled. Currently, you cannot mix other panels on a row with a repeated panel. -Once you've got your Template variables (of any type) configured the way you'd like, check out the Repeating Panels and Repeating Row documentation +Only make changes to the first panel (the original template). To have the changes take effect on all panels you need to trigger a dynamic dashboard re-build. +You can do this by either changing the variable value (that is the basis for the repeat) or reload the dashboard. -## Screencast - Templated Graphite Queries +## Repeating Rows - +This option requires you to open the row options view. Hover over the row left side to trigger the row menu, in this menu click `Row Options`. This +opens the row options view. Here you find a *Repeat* dropdown where you can select the variable to repeat by. + +### URL state + +Variable values are always synced to the URL using the syntax `var-=value`. + +### Examples + +- [Graphite Templated Dashboard](http://play.grafana.org/dashboard/db/graphite-templated-nested) +- [Elasticsearch Templated Dashboard](http://play.grafana.org/dashboard/db/elasticsearch-templated) +- [InfluxDB Templated Dashboard](http://play.grafana.org/dashboard/db/influxdb-templated-queries) diff --git a/docs/sources/tutorials/screencasts.md b/docs/sources/tutorials/screencasts.md index f92ead64d49..e92a07c51a7 100644 --- a/docs/sources/tutorials/screencasts.md +++ b/docs/sources/tutorials/screencasts.md @@ -9,58 +9,116 @@ weight = 10 # Screencasts - -{{< screencast src="https://www.youtube.com/embed/sKNZMtoSHN4?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}} - -### Episode 7 - Beginners guide to building dashboards - -For newer users of Grafana, this screencast will familiarize you with the general UI and teach you how to build your first Dashboard. - -
- -{{< screencast src="https://www.youtube.com/embed/9ZCMVNxUf6s?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}} - -### Episode 6 - Adding data sources, users & organizations - -Now that Grafana has been installed, learn about adding data sources and get a closer look at adding and managing Users and Organizations. - -
- -{{< screencast src="https://www.youtube.com/embed/E-gMFv85FE8?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}} - -### Episode 5 - Installation & Configuration on Red Hat / CentOS - -This screencasts shows how to get Grafana 2.0 installed and configured quickly on RPM-based Linux operating systems. - -
-{{< screencast src="https://www.youtube.com/embed/JY22EBOR9hQ?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}} - -### Episode 4 - Installation & Configuration on Ubuntu / Debian - -Learn how to easily install the dependencies and packages to get Grafana 2.0 up and running on Ubuntu or Debian in just a few minutes. - -
- -{{< screencast src="https://www.youtube.com/embed/FC13uhFRsVw?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}} - -### Episode 3 - Whats New In Grafana 2.0 - -This screencast highlights many of the great new features that were included in the Grafana 2.0 release. - -
- -{{< screencast src="//www.youtube.com/embed/FhNUrueWwOk?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}} -### Episode 2 - Templated Graphite Queries - -The screencast focuses on Templating with the Graphite Data Source. Learn how to make dynamic and adaptable Dashboards for your Graphite metrics. - -
- -{{< screencast src="//www.youtube.com/embed/mgcJPREl3CU?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" >}} -### Episode 1 - Building Graphite Queries - -Learn how the Graphite Query Editor works, and how to use different graphing functions. There's also an introduction to graph display settings. - -
- + diff --git a/emails/templates/alert_notification.html b/emails/templates/alert_notification.html index d0d69faa106..e12fb4ea3e6 100644 --- a/emails/templates/alert_notification.html +++ b/emails/templates/alert_notification.html @@ -28,6 +28,29 @@ +[[if ne .Error "" ]] + + + + +
+
+ + + + + + + +
+
Error message
+
+

[[.Error]]

+
+
+
+[[end]] + [[if ne .State "ok" ]] diff --git a/emails/templates/layouts/default.html b/emails/templates/layouts/default.html index f6d729054cd..07eb32874c7 100644 --- a/emails/templates/layouts/default.html +++ b/emails/templates/layouts/default.html @@ -94,8 +94,7 @@ td[class="stack-column-center"] {
-
- +
diff --git a/package.json b/package.json index d38de899bf5..6524892f20a 100644 --- a/package.json +++ b/package.json @@ -4,7 +4,7 @@ "company": "Coding Instinct AB" }, "name": "grafana", - "version": "4.3.0-pre1", + "version": "4.4.0-pre1", "repository": { "type": "git", "url": "http://github.com/grafana/grafana.git" @@ -76,8 +76,8 @@ "systemjs-builder": "^0.15.34", "tether": "^1.4.0", "tether-drop": "https://github.com/torkelo/drop", - "tslint": "^4.0.2", - "typescript": "^2.1.4", + "tslint": "^5.1.0", + "typescript": "^2.2.2", "virtual-scroll": "^1.1.1" } } diff --git a/packaging/deb/init.d/grafana-server b/packaging/deb/init.d/grafana-server index 61e82d4c612..d01778560f7 100755 --- a/packaging/deb/init.d/grafana-server +++ b/packaging/deb/init.d/grafana-server @@ -37,14 +37,8 @@ MAX_OPEN_FILES=10000 PID_FILE=/var/run/$NAME.pid DAEMON=/usr/sbin/$NAME - umask 0027 -if [ `id -u` -ne 0 ]; then - echo "You need root privileges to run this script" - exit 4 -fi - if [ ! -x $DAEMON ]; then echo "Program not installed or not executable" exit 5 @@ -63,9 +57,16 @@ fi DAEMON_OPTS="--pidfile=${PID_FILE} --config=${CONF_FILE} cfg:default.paths.data=${DATA_DIR} cfg:default.paths.logs=${LOG_DIR} cfg:default.paths.plugins=${PLUGINS_DIR}" +function checkUser() { + if [ `id -u` -ne 0 ]; then + echo "You need root privileges to run this script" + exit 4 + fi +} + case "$1" in start) - + checkUser log_daemon_msg "Starting $DESC" pid=`pidofproc -p $PID_FILE grafana` @@ -112,6 +113,7 @@ case "$1" in log_end_msg $return ;; stop) + checkUser log_daemon_msg "Stopping $DESC" if [ -f "$PID_FILE" ]; then diff --git a/packaging/publish/publish_both.sh b/packaging/publish/publish_both.sh index 5edf679b481..07a64046482 100755 --- a/packaging/publish/publish_both.sh +++ b/packaging/publish/publish_both.sh @@ -1,5 +1,5 @@ #! /usr/bin/env bash -version=4.2.0 +version=4.3.1 wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb diff --git a/packaging/publish/publish_testing.sh b/packaging/publish/publish_testing.sh index 76419eb7a7e..8d27a35b826 100755 --- a/packaging/publish/publish_testing.sh +++ b/packaging/publish/publish_testing.sh @@ -1,6 +1,6 @@ #! /usr/bin/env bash -deb_ver=4.2.0-beta1 -rpm_ver=4.2.0-beta1 +deb_ver=4.3.0-beta1 +rpm_ver=4.3.0-beta1 wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${deb_ver}_amd64.deb diff --git a/packaging/rpm/init.d/grafana-server b/packaging/rpm/init.d/grafana-server index cb9bb73de7d..a9e2988bdb7 100755 --- a/packaging/rpm/init.d/grafana-server +++ b/packaging/rpm/init.d/grafana-server @@ -36,11 +36,6 @@ MAX_OPEN_FILES=10000 PID_FILE=/var/run/$NAME.pid DAEMON=/usr/sbin/$NAME -if [ `id -u` -ne 0 ]; then - echo "You need root privileges to run this script" - exit 4 -fi - if [ ! -x $DAEMON ]; then echo "Program not installed or not executable" exit 5 @@ -70,8 +65,16 @@ function isRunning() { status -p $PID_FILE $NAME > /dev/null 2>&1 } +function checkUser() { + if [ `id -u` -ne 0 ]; then + echo "You need root privileges to run this script" + exit 4 + fi +} + case "$1" in start) + checkUser isRunning if [ $? -eq 0 ]; then echo "Already running." @@ -115,6 +118,7 @@ case "$1" in exit $return ;; stop) + checkUser echo -n "Stopping $DESC: ..." if [ -f "$PID_FILE" ]; then diff --git a/pkg/api/alerting.go b/pkg/api/alerting.go index 5652196b58c..58ea56cc8a7 100644 --- a/pkg/api/alerting.go +++ b/pkg/api/alerting.go @@ -255,6 +255,9 @@ func NotificationTest(c *middleware.Context, dto dtos.NotificationTestCommand) R } if err := bus.Dispatch(cmd); err != nil { + if err == models.ErrSmtpNotEnabled { + return ApiError(412, err.Error(), err) + } return ApiError(500, "Failed to send alert notifications", err) } diff --git a/pkg/api/annotations.go b/pkg/api/annotations.go index 48bf6c327ad..e07c77f1c1d 100644 --- a/pkg/api/annotations.go +++ b/pkg/api/annotations.go @@ -39,12 +39,53 @@ func GetAnnotations(c *middleware.Context) Response { Text: item.Text, Metric: item.Metric, Title: item.Title, + PanelId: item.PanelId, + RegionId: item.RegionId, + Type: string(item.Type), }) } return Json(200, result) } +func PostAnnotation(c *middleware.Context, cmd dtos.PostAnnotationsCmd) Response { + repo := annotations.GetRepository() + + item := annotations.Item{ + OrgId: c.OrgId, + DashboardId: cmd.DashboardId, + PanelId: cmd.PanelId, + Epoch: cmd.Time / 1000, + Title: cmd.Title, + Text: cmd.Text, + CategoryId: cmd.CategoryId, + NewState: cmd.FillColor, + Type: annotations.EventType, + } + + if err := repo.Save(&item); err != nil { + return ApiError(500, "Failed to save annotation", err) + } + + // handle regions + if cmd.IsRegion { + item.RegionId = item.Id + + if err := repo.Update(&item); err != nil { + return ApiError(500, "Failed set regionId on annotation", err) + } + + item.Id = 0 + item.Epoch = cmd.TimeEnd + + if err := repo.Save(&item); err != nil { + return ApiError(500, "Failed save annotation for region end time", err) + } + } + + return ApiSuccess("Annotation added") +} + func DeleteAnnotations(c *middleware.Context, cmd dtos.DeleteAnnotationsCmd) Response { repo := annotations.GetRepository() diff --git a/pkg/api/api.go b/pkg/api/api.go index 843b68eb915..c3a3728338d 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -223,6 +223,13 @@ func (hs *HttpServer) registerRoutes() { // Dashboard r.Group("/dashboards", func() { r.Combo("/db/:slug").Get(GetDashboard).Delete(DeleteDashboard) + + r.Get("/id/:dashboardId/versions", wrap(GetDashboardVersions)) + r.Get("/id/:dashboardId/versions/:id", wrap(GetDashboardVersion)) + r.Post("/id/:dashboardId/restore", reqEditorRole, bind(dtos.RestoreDashboardVersionCommand{}), wrap(RestoreDashboardVersion)) + + r.Post("/calculate-diff", bind(dtos.CalculateDiffOptions{}), wrap(CalculateDashboardDiff)) + r.Post("/db", reqEditorRole, bind(m.SaveDashboardCommand{}), wrap(PostDashboard)) r.Get("/file/:file", GetDashboardFromJsonFile) r.Get("/home", wrap(GetHomeDashboard)) @@ -253,6 +260,7 @@ func (hs *HttpServer) registerRoutes() { r.Post("/tsdb/query", bind(dtos.MetricRequest{}), wrap(QueryMetrics)) r.Get("/tsdb/testdata/scenarios", wrap(GetTestDataScenarios)) r.Get("/tsdb/testdata/gensql", reqGrafanaAdmin, wrap(GenerateSqlTestData)) + r.Get("/tsdb/testdata/random-walk", wrap(GetTestDataRandomWalk)) // metrics r.Get("/metrics", wrap(GetInternalMetrics)) @@ -277,7 +285,10 @@ func (hs *HttpServer) registerRoutes() { }, reqEditorRole) r.Get("/annotations", wrap(GetAnnotations)) - r.Post("/annotations/mass-delete", reqOrgAdmin, bind(dtos.DeleteAnnotationsCmd{}), wrap(DeleteAnnotations)) + + r.Group("/annotations", func() { + r.Post("/", bind(dtos.PostAnnotationsCmd{}), wrap(PostAnnotation)) + }, reqEditorRole) // error test r.Get("/metrics/error", wrap(GenerateError)) diff --git a/pkg/api/cloudwatch/cloudwatch_test.go b/pkg/api/cloudwatch/cloudwatch_test.go index 41050c102ff..35b034b3777 100644 --- a/pkg/api/cloudwatch/cloudwatch_test.go +++ b/pkg/api/cloudwatch/cloudwatch_test.go @@ -16,7 +16,8 @@ func TestECSCredProvider(t *testing.T) { defer os.Clearenv() os.Setenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", "/abc/123") - provider := remoteCredProvider(&session.Session{}) + sess, _ := session.NewSession() + provider := remoteCredProvider(sess) So(provider, ShouldNotBeNil) @@ -30,7 +31,8 @@ func TestECSCredProvider(t *testing.T) { func TestDefaultEC2RoleProvider(t *testing.T) { Convey("Running outside an ECS container task", t, func() { - provider := remoteCredProvider(&session.Session{}) + sess, _ := session.NewSession() + provider := remoteCredProvider(sess) So(provider, ShouldNotBeNil) diff --git a/pkg/api/cloudwatch/metrics.go b/pkg/api/cloudwatch/metrics.go index 67f35a60aa7..f8b93d361f5 100644 --- a/pkg/api/cloudwatch/metrics.go +++ b/pkg/api/cloudwatch/metrics.go @@ -30,12 +30,13 @@ var customMetricsDimensionsMap map[string]map[string]map[string]*CustomMetricsCa func init() { metricsMap = map[string][]string{ "AWS/ApiGateway": {"4XXError", "5XXError", "CacheHitCount", "CacheMissCount", "Count", "IntegrationLatency", "Latency"}, - "AWS/ApplicationELB": {"ActiveConnectionCount", "ClientTLSNegotiationErrorCount", "HealthyHostCount", "HTTPCode_ELB_4XX_Count", "HTTPCode_ELB_5XX_Count", "HTTPCode_Target_2XX_Count", "HTTPCode_Target_3XX_Count", "HTTPCode_Target_4XX_Count", "HTTPCode_Target_5XX_Count", "NewConnectionCount", "ProcessedBytes", "RejectedConnectionCount", "RequestCount", "TargetConnectionErrorCount", "TargetResponseTime", "TargetTLSNegotiationErrorCount", "UnHealthyHostCount"}, + "AWS/ApplicationELB": {"ActiveConnectionCount", "ClientTLSNegotiationErrorCount", "HealthyHostCount", "HTTPCode_ELB_4XX_Count", "HTTPCode_ELB_5XX_Count", "HTTPCode_Target_2XX_Count", "HTTPCode_Target_3XX_Count", "HTTPCode_Target_4XX_Count", "HTTPCode_Target_5XX_Count", "IPv6ProcessedBytes", "IPv6RequestCount", "NewConnectionCount", "ProcessedBytes", "RejectedConnectionCount", "RequestCount", "TargetConnectionErrorCount", "TargetResponseTime", "TargetTLSNegotiationErrorCount", "UnHealthyHostCount"}, "AWS/AutoScaling": {"GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity", "GroupInServiceInstances", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"}, "AWS/Billing": {"EstimatedCharges"}, "AWS/CloudFront": {"Requests", "BytesDownloaded", "BytesUploaded", "TotalErrorRate", "4xxErrorRate", "5xxErrorRate"}, "AWS/CloudSearch": {"SuccessfulRequests", "SearchableDocuments", "IndexUtilization", "Partitions"}, - "AWS/DynamoDB": {"ConditionalCheckFailedRequests", "ConsumedReadCapacityUnits", "ConsumedWriteCapacityUnits", "OnlineIndexConsumedWriteCapacity", "OnlineIndexPercentageProgress", "OnlineIndexThrottleEvents", "ProvisionedReadCapacityUnits", "ProvisionedWriteCapacityUnits", "ReadThrottleEvents", "ReturnedBytes", "ReturnedItemCount", "ReturnedRecordsCount", "SuccessfulRequestLatency", "SystemErrors", "ThrottledRequests", "UserErrors", "WriteThrottleEvents"}, + "AWS/DMS": {"FreeableMemory", "WriteIOPS", "ReadIOPS", "WriteThroughput", "ReadThroughput", "WriteLatency", "ReadLatency", "SwapUsage", "NetworkTransmitThroughput", "NetworkReceiveThroughput", "FullLoadThroughputBandwidthSource", "FullLoadThroughputBandwidthTarget", "FullLoadThroughputRowsSource", "FullLoadThroughputRowsTarget", "CDCIncomingChanges", "CDCChangesMemorySource", "CDCChangesMemoryTarget", "CDCChangesDiskSource", "CDCChangesDiskTarget", "CDCThroughputBandwidthTarget", "CDCThroughputRowsSource", "CDCThroughputRowsTarget", "CDCLatencySource", "CDCLatencyTarget"}, + "AWS/DynamoDB": {"ConditionalCheckFailedRequests", "ConsumedReadCapacityUnits", "ConsumedWriteCapacityUnits", "OnlineIndexConsumedWriteCapacity", "OnlineIndexPercentageProgress", "OnlineIndexThrottleEvents", "ProvisionedReadCapacityUnits", "ProvisionedWriteCapacityUnits", "ReadThrottleEvents", "ReturnedBytes", "ReturnedItemCount", "ReturnedRecordsCount", "SuccessfulRequestLatency", "SystemErrors", "TimeToLiveDeletedItemCount", "ThrottledRequests", "UserErrors", "WriteThrottleEvents"}, "AWS/EBS": {"VolumeReadBytes", "VolumeWriteBytes", "VolumeReadOps", "VolumeWriteOps", "VolumeTotalReadTime", "VolumeTotalWriteTime", "VolumeIdleTime", "VolumeQueueLength", "VolumeThroughputPercentage", "VolumeConsumedReadWriteOps", "BurstBalance"}, "AWS/EC2": {"CPUCreditUsage", "CPUCreditBalance", "CPUUtilization", "DiskReadOps", "DiskWriteOps", "DiskReadBytes", "DiskWriteBytes", "NetworkIn", "NetworkOut", "NetworkPacketsIn", "NetworkPacketsOut", "StatusCheckFailed", "StatusCheckFailed_Instance", "StatusCheckFailed_System"}, "AWS/EC2Spot": {"AvailableInstancePoolsCount", "BidsSubmittedForCapacity", "EligibleInstancePoolCount", "FulfilledCapacity", "MaxPercentCapacityAllocation", "PendingCapacity", "PercentCapacityAllocation", "TargetCapacity", "TerminatingCapacity"}, @@ -68,27 +69,28 @@ func init() { "CoreNodesRunning", "CoreNodesPending", "LiveDataNodes", "MRTotalNodes", "MRActiveNodes", "MRLostNodes", "MRUnhealthyNodes", "MRDecommissionedNodes", "MRRebootedNodes", "S3BytesWritten", "S3BytesRead", "HDFSUtilization", "HDFSBytesRead", "HDFSBytesWritten", "MissingBlocks", "CorruptBlocks", "TotalLoad", "MemoryTotalMB", "MemoryReservedMB", "MemoryAvailableMB", "MemoryAllocatedMB", "PendingDeletionBlocks", "UnderReplicatedBlocks", "DfsPendingReplicationBlocks", "CapacityRemainingGB", "HbaseBackupFailed", "MostRecentBackupDuration", "TimeSinceLastSuccessfulBackup"}, - "AWS/ES": {"ClusterStatus.green", "ClusterStatus.yellow", "ClusterStatus.red", "Nodes", "SearchableDocuments", "DeletedDocuments", "CPUUtilization", "FreeStorageSpace", "JVMMemoryPressure", "AutomatedSnapshotFailure", "MasterCPUUtilization", "MasterFreeStorageSpace", "MasterJVMMemoryPressure", "ReadLatency", "WriteLatency", "ReadThroughput", "WriteThroughput", "DiskQueueLength", "ReadIOPS", "WriteIOPS"}, + "AWS/ES": {"ClusterStatus.green", "ClusterStatus.yellow", "ClusterStatus.red", "ClusterUsedSpace", "Nodes", "SearchableDocuments", "DeletedDocuments", "CPUCreditBalance", "CPUUtilization", "FreeStorageSpace", "JVMMemoryPressure", "AutomatedSnapshotFailure", "MasterCPUCreditBalance", "MasterCPUUtilization", "MasterFreeStorageSpace", "MasterJVMMemoryPressure", "ReadLatency", "WriteLatency", "ReadThroughput", "WriteThroughput", "DiskQueueDepth", "ReadIOPS", "WriteIOPS"}, "AWS/Events": {"Invocations", "FailedInvocations", "TriggeredRules", "MatchedEvents", "ThrottledRules"}, "AWS/Firehose": {"DeliveryToElasticsearch.Bytes", "DeliveryToElasticsearch.Records", "DeliveryToElasticsearch.Success", "DeliveryToRedshift.Bytes", "DeliveryToRedshift.Records", "DeliveryToRedshift.Success", "DeliveryToS3.Bytes", "DeliveryToS3.DataFreshness", "DeliveryToS3.Records", "DeliveryToS3.Success", "IncomingBytes", "IncomingRecords", "DescribeDeliveryStream.Latency", "DescribeDeliveryStream.Requests", "ListDeliveryStreams.Latency", "ListDeliveryStreams.Requests", "PutRecord.Bytes", "PutRecord.Latency", "PutRecord.Requests", "PutRecordBatch.Bytes", "PutRecordBatch.Latency", "PutRecordBatch.Records", "PutRecordBatch.Requests", "UpdateDeliveryStream.Latency", "UpdateDeliveryStream.Requests"}, "AWS/IoT": {"PublishIn.Success", "PublishOut.Success", "Subscribe.Success", "Ping.Success", "Connect.Success", "GetThingShadow.Accepted"}, "AWS/Kinesis": {"GetRecords.Bytes", "GetRecords.IteratorAge", "GetRecords.IteratorAgeMilliseconds", "GetRecords.Latency", "GetRecords.Records", "GetRecords.Success", "IncomingBytes", "IncomingRecords", "PutRecord.Bytes", "PutRecord.Latency", "PutRecord.Success", "PutRecords.Bytes", "PutRecords.Latency", "PutRecords.Records", "PutRecords.Success", "ReadProvisionedThroughputExceeded", "WriteProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "OutgoingBytes", "OutgoingRecords"}, "AWS/KinesisAnalytics": {"Bytes", "MillisBehindLatest", "Records", "Success"}, - "AWS/Lambda": {"Invocations", "Errors", "Duration", "Throttles"}, + "AWS/Lambda": {"Invocations", "Errors", "Duration", "Throttles", "IteratorAge"}, "AWS/Logs": {"IncomingBytes", "IncomingLogEvents", "ForwardedBytes", "ForwardedLogEvents", "DeliveryErrors", "DeliveryThrottling"}, "AWS/ML": {"PredictCount", "PredictFailureCount"}, "AWS/OpsWorks": {"cpu_idle", "cpu_nice", "cpu_system", "cpu_user", "cpu_waitio", "load_1", "load_5", "load_15", "memory_buffers", "memory_cached", "memory_free", "memory_swap", "memory_total", "memory_used", "procs"}, "AWS/Redshift": {"CPUUtilization", "DatabaseConnections", "HealthStatus", "MaintenanceMode", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "PercentageDiskSpaceUsed", "ReadIOPS", "ReadLatency", "ReadThroughput", "WriteIOPS", "WriteLatency", "WriteThroughput"}, - "AWS/RDS": {"ActiveTransactions", "AuroraBinlogReplicaLag", "AuroraReplicaLag", "AuroraReplicaLagMaximum", "AuroraReplicaLagMinimum", "BinLogDiskUsage", "BlockedTransactions", "BufferCacheHitRatio", "CommitLatency", "CommitThroughput", "CPUCreditBalance", "CPUCreditUsage", "CPUUtilization", "DatabaseConnections", "DDLLatency", "DDLThroughput", "Deadlocks", "DiskQueueDepth", "DMLLatency", "DMLThroughput", "FailedSqlStatements", "FreeableMemory", "FreeStorageSpace", "LoginFailures", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "ReadIOPS", "ReadLatency", "ReadThroughput", "ReplicaLag", "ResultSetCacheHitRatio", "SelectLatency", "SelectThroughput", "SwapUsage", "TotalConnections", "VolumeReadIOPS", "VolumeWriteIOPS", "WriteIOPS", "WriteLatency", "WriteThroughput"}, - "AWS/Route53": {"HealthCheckStatus", "HealthCheckPercentageHealthy", "ConnectionTime", "SSLHandshakeTime", "TimeToFirstByte"}, + "AWS/RDS": {"ActiveTransactions", "AuroraBinlogReplicaLag", "AuroraReplicaLag", "AuroraReplicaLagMaximum", "AuroraReplicaLagMinimum", "BinLogDiskUsage", "BlockedTransactions", "BufferCacheHitRatio", "CommitLatency", "CommitThroughput", "BinLogDiskUsage", "CPUCreditBalance", "CPUCreditUsage", "CPUUtilization", "DatabaseConnections", "DDLLatency", "DDLThroughput", "Deadlocks", "DeleteLatency", "DeleteThroughput", "DiskQueueDepth", "DMLLatency", "DMLThroughput", "EngineUptime", "FailedSqlStatements", "FreeableMemory", "FreeLocalStorage", "FreeStorageSpace", "InsertLatency", "InsertThroughput", "LoginFailures", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "NetworkThroughput", "Queries", "ReadIOPS", "ReadLatency", "ReadThroughput", "ReplicaLag", "ResultSetCacheHitRatio", "SelectLatency", "SelectThroughput", "SwapUsage", "TotalConnections", "UpdateLatency", "UpdateThroughput", "VolumeBytesUsed", "VolumeReadIOPS", "VolumeWriteIOPS", "WriteIOPS", "WriteLatency", "WriteThroughput"}, + "AWS/Route53": {"ChildHealthCheckHealthyCount", "HealthCheckStatus", "HealthCheckPercentageHealthy", "ConnectionTime", "SSLHandshakeTime", "TimeToFirstByte"}, "AWS/S3": {"BucketSizeBytes", "NumberOfObjects", "AllRequests", "GetRequests", "PutRequests", "DeleteRequests", "HeadRequests", "PostRequests", "ListRequests", "BytesDownloaded", "BytesUploaded", "4xxErrors", "5xxErrors", "FirstByteLatency", "TotalRequestLatency"}, "AWS/SES": {"Bounce", "Complaint", "Delivery", "Reject", "Send"}, "AWS/SNS": {"NumberOfMessagesPublished", "PublishSize", "NumberOfNotificationsDelivered", "NumberOfNotificationsFailed"}, - "AWS/SQS": {"NumberOfMessagesSent", "SentMessageSize", "NumberOfMessagesReceived", "NumberOfEmptyReceives", "NumberOfMessagesDeleted", "ApproximateNumberOfMessagesDelayed", "ApproximateNumberOfMessagesVisible", "ApproximateNumberOfMessagesNotVisible"}, + "AWS/SQS": {"NumberOfMessagesSent", "SentMessageSize", "NumberOfMessagesReceived", "NumberOfEmptyReceives", "NumberOfMessagesDeleted", "ApproximateAgeOfOldestMessage", "ApproximateNumberOfMessagesDelayed", "ApproximateNumberOfMessagesVisible", "ApproximateNumberOfMessagesNotVisible"}, "AWS/StorageGateway": {"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "CloudBytesDownloaded", "CloudDownloadLatency", "CloudBytesUploaded", "UploadBufferFree", "UploadBufferPercentUsed", "UploadBufferUsed", "QueuedWrites", "ReadBytes", "ReadTime", "TotalCacheSize", "WriteBytes", "WriteTime", "TimeSinceLastRecoveryPoint", "WorkingStorageFree", "WorkingStoragePercentUsed", "WorkingStorageUsed", "CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "ReadBytes", "ReadTime", "WriteBytes", "WriteTime", "QueuedWrites"}, "AWS/SWF": {"DecisionTaskScheduleToStartTime", "DecisionTaskStartToCloseTime", "DecisionTasksCompleted", "StartedDecisionTasksTimedOutOnClose", "WorkflowStartToCloseTime", "WorkflowsCanceled", "WorkflowsCompleted", "WorkflowsContinuedAsNew", "WorkflowsFailed", "WorkflowsTerminated", "WorkflowsTimedOut", "ActivityTaskScheduleToCloseTime", "ActivityTaskScheduleToStartTime", "ActivityTaskStartToCloseTime", "ActivityTasksCanceled", "ActivityTasksCompleted", "ActivityTasksFailed", "ScheduledActivityTasksTimedOutOnClose", "ScheduledActivityTasksTimedOutOnStart", "StartedActivityTasksTimedOutOnClose", "StartedActivityTasksTimedOutOnHeartbeat"}, + "AWS/VPN": {"TunnelState", "TunnelDataIn", "TunnelDataOut"}, "AWS/WAF": {"AllowedRequests", "BlockedRequests", "CountedRequests"}, "AWS/WorkSpaces": {"Available", "Unhealthy", "ConnectionAttempt", "ConnectionSuccess", "ConnectionFailure", "SessionLaunchTime", "InSessionLatency", "SessionDisconnect"}, "KMS": {"SecondsUntilKeyMaterialExpiration"}, @@ -100,6 +102,7 @@ func init() { "AWS/Billing": {"ServiceName", "LinkedAccount", "Currency"}, "AWS/CloudFront": {"DistributionId", "Region"}, "AWS/CloudSearch": {}, + "AWS/DMS": {"ReplicationInstanceIdentifier", "ReplicationTaskIdentifier"}, "AWS/DynamoDB": {"TableName", "GlobalSecondaryIndexName", "Operation", "StreamLabel"}, "AWS/EBS": {"VolumeId"}, "AWS/EC2": {"AutoScalingGroupName", "ImageId", "InstanceId", "InstanceType"}, @@ -121,14 +124,15 @@ func init() { "AWS/ML": {"MLModelId", "RequestMode"}, "AWS/OpsWorks": {"StackId", "LayerId", "InstanceId"}, "AWS/Redshift": {"NodeID", "ClusterIdentifier"}, - "AWS/RDS": {"DBInstanceIdentifier", "DBClusterIdentifier", "DatabaseClass", "EngineName"}, - "AWS/Route53": {"HealthCheckId"}, + "AWS/RDS": {"DBInstanceIdentifier", "DBClusterIdentifier", "DatabaseClass", "EngineName", "Role"}, + "AWS/Route53": {"HealthCheckId", "Region"}, "AWS/S3": {"BucketName", "StorageType", "FilterId"}, "AWS/SES": {}, "AWS/SNS": {"Application", "Platform", "TopicName"}, "AWS/SQS": {"QueueName"}, "AWS/StorageGateway": {"GatewayId", "GatewayName", "VolumeId"}, "AWS/SWF": {"Domain", "WorkflowTypeName", "WorkflowTypeVersion", "ActivityTypeName", "ActivityTypeVersion"}, + "AWS/VPN": {"VpnId", "TunnelIpAddress"}, "AWS/WAF": {"Rule", "WebACL"}, "AWS/WorkSpaces": {"DirectoryId", "WorkspaceId"}, "KMS": {"KeyId"}, diff --git a/pkg/api/dashboard.go b/pkg/api/dashboard.go index 55925c4faf6..df0cbbd745c 100644 --- a/pkg/api/dashboard.go +++ b/pkg/api/dashboard.go @@ -2,12 +2,14 @@ package api import ( "encoding/json" + "fmt" "os" "path" "strings" "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/components/dashdiffs" "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/metrics" @@ -60,6 +62,9 @@ func GetDashboard(c *middleware.Context) { creator = getUserLogin(dash.CreatedBy) } + // make sure db version is in sync with json model version + dash.Data.Set("version", dash.Version) + dto := dtos.DashboardFullWithMeta{ Dashboard: dash.Data, Meta: dtos.DashboardMeta{ @@ -77,6 +82,7 @@ func GetDashboard(c *middleware.Context) { }, } + // TODO(ben): copy this performance metrics logic for the new API endpoints added c.TimeRequest(metrics.M_Api_Dashboard_Get) c.JSON(200, dto) } @@ -114,18 +120,15 @@ func DeleteDashboard(c *middleware.Context) { func PostDashboard(c *middleware.Context, cmd m.SaveDashboardCommand) Response { cmd.OrgId = c.OrgId - - if !c.IsSignedIn { - cmd.UserId = -1 - } else { - cmd.UserId = c.UserId - } + cmd.UserId = c.UserId dash := cmd.GetDashboardModel() + // Check if Title is empty if dash.Title == "" { return ApiError(400, m.ErrDashboardTitleEmpty.Error(), nil) } + if dash.Id == 0 { limitReached, err := middleware.QuotaReached(c, "dashboard") if err != nil { @@ -255,6 +258,135 @@ func GetDashboardFromJsonFile(c *middleware.Context) { c.JSON(200, &dash) } +// GetDashboardVersions returns all dashboard versions as JSON +func GetDashboardVersions(c *middleware.Context) Response { + dashboardId := c.ParamsInt64(":dashboardId") + limit := c.QueryInt("limit") + start := c.QueryInt("start") + + if limit == 0 { + limit = 1000 + } + + query := m.GetDashboardVersionsQuery{ + OrgId: c.OrgId, + DashboardId: dashboardId, + Limit: limit, + Start: start, + } + + if err := bus.Dispatch(&query); err != nil { + return ApiError(404, fmt.Sprintf("No versions found for dashboardId %d", dashboardId), err) + } + + for _, version := range query.Result { + if version.RestoredFrom == version.Version { + version.Message = "Initial save (created by migration)" + continue + } + + if version.RestoredFrom > 0 { + version.Message = fmt.Sprintf("Restored from version %d", version.RestoredFrom) + continue + } + + if version.ParentVersion == 0 { + version.Message = "Initial save" + } + } + + return Json(200, query.Result) +} + +// GetDashboardVersion returns the dashboard version with the given ID. +func GetDashboardVersion(c *middleware.Context) Response { + dashboardId := c.ParamsInt64(":dashboardId") + version := c.ParamsInt(":id") + + query := m.GetDashboardVersionQuery{ + OrgId: c.OrgId, + DashboardId: dashboardId, + Version: version, + } + + if err := bus.Dispatch(&query); err != nil { + return ApiError(500, fmt.Sprintf("Dashboard version %d not found for dashboardId %d", version, dashboardId), err) + } + + creator := "Anonymous" + if query.Result.CreatedBy > 0 { + creator = getUserLogin(query.Result.CreatedBy) + } + + dashVersionMeta := &m.DashboardVersionMeta{ + DashboardVersion: *query.Result, + CreatedBy: creator, + } + + return Json(200, dashVersionMeta) +} + +// POST /api/dashboards/calculate-diff performs diffs on two dashboards +func CalculateDashboardDiff(c *middleware.Context, apiOptions dtos.CalculateDiffOptions) Response { + + options := dashdiffs.Options{ + OrgId: c.OrgId, + DiffType: dashdiffs.ParseDiffType(apiOptions.DiffType), + Base: dashdiffs.DiffTarget{ + DashboardId: apiOptions.Base.DashboardId, + Version: apiOptions.Base.Version, + UnsavedDashboard: apiOptions.Base.UnsavedDashboard, + }, + New: dashdiffs.DiffTarget{ + DashboardId: apiOptions.New.DashboardId, + Version: apiOptions.New.Version, + UnsavedDashboard: apiOptions.New.UnsavedDashboard, + }, + } + + result, err := dashdiffs.CalculateDiff(&options) + if err != nil { + if err == m.ErrDashboardVersionNotFound { + return ApiError(404, "Dashboard version not found", err) + } + return ApiError(500, "Unable to compute diff", err) + } + + if options.DiffType == dashdiffs.DiffDelta { + return Respond(200, result.Delta).Header("Content-Type", "application/json") + } else { + return Respond(200, result.Delta).Header("Content-Type", "text/html") + } +} + +// RestoreDashboardVersion restores a dashboard to the given version. +func RestoreDashboardVersion(c *middleware.Context, apiCmd dtos.RestoreDashboardVersionCommand) Response { + dashboardId := c.ParamsInt64(":dashboardId") + + dashQuery := m.GetDashboardQuery{Id: dashboardId, OrgId: c.OrgId} + if err := bus.Dispatch(&dashQuery); err != nil { + return ApiError(404, "Dashboard not found", nil) + } + + versionQuery := m.GetDashboardVersionQuery{DashboardId: dashboardId, Version: apiCmd.Version, OrgId: c.OrgId} + if err := bus.Dispatch(&versionQuery); err != nil { + return ApiError(404, "Dashboard version not found", nil) + } + + dashboard := dashQuery.Result + version := versionQuery.Result + + saveCmd := m.SaveDashboardCommand{} + saveCmd.RestoredFrom = version.Version + saveCmd.OrgId = c.OrgId + saveCmd.UserId = c.UserId + saveCmd.Dashboard = version.Data + saveCmd.Dashboard.Set("version", dashboard.Version) + saveCmd.Message = fmt.Sprintf("Restored from version %d", version.Version) + + return PostDashboard(c, saveCmd) +} + func GetDashboardTags(c *middleware.Context) { query := m.GetDashboardTagsQuery{OrgId: c.OrgId} err := bus.Dispatch(&query) diff --git a/pkg/api/dataproxy.go b/pkg/api/dataproxy.go index 612d013e802..965653c2501 100644 --- a/pkg/api/dataproxy.go +++ b/pkg/api/dataproxy.go @@ -3,6 +3,7 @@ package api import ( "bytes" "io/ioutil" + "net" "net/http" "net/http/httputil" "net/url" @@ -62,6 +63,27 @@ func NewReverseProxy(ds *m.DataSource, proxyPath string, targetUrl *url.URL) *ht // clear cookie headers req.Header.Del("Cookie") req.Header.Del("Set-Cookie") + + // clear X-Forwarded Host/Port/Proto headers + req.Header.Del("X-Forwarded-Host") + req.Header.Del("X-Forwarded-Port") + req.Header.Del("X-Forwarded-Proto") + + // set X-Forwarded-For header + if req.RemoteAddr != "" { + remoteAddr, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + remoteAddr = req.RemoteAddr + } + if req.Header.Get("X-Forwarded-For") != "" { + req.Header.Set("X-Forwarded-For", req.Header.Get("X-Forwarded-For")+", "+remoteAddr) + } else { + req.Header.Set("X-Forwarded-For", remoteAddr) + } + } + + // reqBytes, _ := httputil.DumpRequestOut(req, true); + // log.Trace("Proxying datasource request: %s", string(reqBytes)) } return &httputil.ReverseProxy{Director: director, FlushInterval: time.Millisecond * 200} diff --git a/pkg/api/datasources.go b/pkg/api/datasources.go index ddf8681c3c8..9ffdc4a6d1b 100644 --- a/pkg/api/datasources.go +++ b/pkg/api/datasources.go @@ -20,7 +20,7 @@ func GetDataSources(c *middleware.Context) Response { result := make(dtos.DataSourceList, 0) for _, ds := range query.Result { - dsItem := dtos.DataSource{ + dsItem := dtos.DataSourceListItemDTO{ Id: ds.Id, OrgId: ds.OrgId, Name: ds.Name, @@ -149,8 +149,8 @@ func fillWithSecureJsonData(cmd *m.UpdateDataSourceCommand) error { if err != nil { return err } - secureJsonData := ds.SecureJsonData.Decrypt() + secureJsonData := ds.SecureJsonData.Decrypt() for k, v := range secureJsonData { if _, ok := cmd.SecureJsonData[k]; !ok { @@ -158,6 +158,8 @@ func fillWithSecureJsonData(cmd *m.UpdateDataSourceCommand) error { } } + // set version from db + cmd.Version = ds.Version return nil } diff --git a/pkg/api/dtos/annotations.go b/pkg/api/dtos/annotations.go index 45415978ee1..958fdff89ca 100644 --- a/pkg/api/dtos/annotations.go +++ b/pkg/api/dtos/annotations.go @@ -12,10 +12,25 @@ type Annotation struct { Title string `json:"title"` Text string `json:"text"` Metric string `json:"metric"` + RegionId int64 `json:"regionId"` + Type string `json:"type"` Data *simplejson.Json `json:"data"` } +type PostAnnotationsCmd struct { + DashboardId int64 `json:"dashboardId"` + PanelId int64 `json:"panelId"` + CategoryId int64 `json:"categoryId"` + Time int64 `json:"time"` + Title string `json:"title"` + Text string `json:"text"` + + FillColor string `json:"fillColor"` + IsRegion bool `json:"isRegion"` + TimeEnd int64 `json:"timeEnd"` +} + type DeleteAnnotationsCmd struct { AlertId int64 `json:"alertId"` DashboardId int64 `json:"dashboardId"` diff --git a/pkg/api/dtos/dashboard.go b/pkg/api/dtos/dashboard.go new file mode 100644 index 00000000000..9ef9a96edc4 --- /dev/null +++ b/pkg/api/dtos/dashboard.go @@ -0,0 +1,49 @@ +package dtos + +import ( + "time" + + "github.com/grafana/grafana/pkg/components/simplejson" +) + +type DashboardMeta struct { + IsStarred bool `json:"isStarred,omitempty"` + IsHome bool `json:"isHome,omitempty"` + IsSnapshot bool `json:"isSnapshot,omitempty"` + Type string `json:"type,omitempty"` + CanSave bool `json:"canSave"` + CanEdit bool `json:"canEdit"` + CanStar bool `json:"canStar"` + Slug string `json:"slug"` + Expires time.Time `json:"expires"` + Created time.Time `json:"created"` + Updated time.Time `json:"updated"` + UpdatedBy string `json:"updatedBy"` + CreatedBy string `json:"createdBy"` + Version int `json:"version"` +} + +type DashboardFullWithMeta struct { + Meta DashboardMeta `json:"meta"` + Dashboard *simplejson.Json `json:"dashboard"` +} + +type DashboardRedirect struct { + RedirectUri string `json:"redirectUri"` +} + +type CalculateDiffOptions struct { + Base CalculateDiffTarget `json:"base" binding:"Required"` + New CalculateDiffTarget `json:"new" binding:"Required"` + DiffType string `json:"diffType" binding:"Required"` +} + +type CalculateDiffTarget struct { + DashboardId int64 `json:"dashboardId"` + Version int `json:"version"` + UnsavedDashboard *simplejson.Json `json:"unsavedDashboard"` +} + +type RestoreDashboardVersionCommand struct { + Version int `json:"version" binding:"Required"` +} diff --git a/pkg/api/dtos/models.go b/pkg/api/dtos/models.go index 564d86a25bf..d1c346e9539 100644 --- a/pkg/api/dtos/models.go +++ b/pkg/api/dtos/models.go @@ -4,7 +4,6 @@ import ( "crypto/md5" "fmt" "strings" - "time" "github.com/grafana/grafana/pkg/components/simplejson" m "github.com/grafana/grafana/pkg/models" @@ -38,32 +37,6 @@ type CurrentUser struct { HelpFlags1 m.HelpFlags1 `json:"helpFlags1"` } -type DashboardMeta struct { - IsStarred bool `json:"isStarred,omitempty"` - IsHome bool `json:"isHome,omitempty"` - IsSnapshot bool `json:"isSnapshot,omitempty"` - Type string `json:"type,omitempty"` - CanSave bool `json:"canSave"` - CanEdit bool `json:"canEdit"` - CanStar bool `json:"canStar"` - Slug string `json:"slug"` - Expires time.Time `json:"expires"` - Created time.Time `json:"created"` - Updated time.Time `json:"updated"` - UpdatedBy string `json:"updatedBy"` - CreatedBy string `json:"createdBy"` - Version int `json:"version"` -} - -type DashboardFullWithMeta struct { - Meta DashboardMeta `json:"meta"` - Dashboard *simplejson.Json `json:"dashboard"` -} - -type DashboardRedirect struct { - RedirectUri string `json:"redirectUri"` -} - type DataSource struct { Id int64 `json:"id"` OrgId int64 `json:"orgId"` @@ -84,7 +57,23 @@ type DataSource struct { SecureJsonFields map[string]bool `json:"secureJsonFields"` } -type DataSourceList []DataSource +type DataSourceListItemDTO struct { + Id int64 `json:"id"` + OrgId int64 `json:"orgId"` + Name string `json:"name"` + Type string `json:"type"` + TypeLogoUrl string `json:"typeLogoUrl"` + Access m.DsAccess `json:"access"` + Url string `json:"url"` + Password string `json:"password"` + User string `json:"user"` + Database string `json:"database"` + BasicAuth bool `json:"basicAuth"` + IsDefault bool `json:"isDefault"` + JsonData *simplejson.Json `json:"jsonData,omitempty"` +} + +type DataSourceList []DataSourceListItemDTO func (slice DataSourceList) Len() int { return len(slice) diff --git a/pkg/api/frontendsettings.go b/pkg/api/frontendsettings.go index 5c3dd19bd87..4d84480a208 100644 --- a/pkg/api/frontendsettings.go +++ b/pkg/api/frontendsettings.go @@ -102,18 +102,15 @@ func getFrontendSettingsMap(c *middleware.Context) (map[string]interface{}, erro datasources[ds.Name] = dsMap } - // add grafana backend data source - grafanaDatasourceMeta, _ := plugins.DataSources["grafana"] - datasources["-- Grafana --"] = map[string]interface{}{ - "type": "grafana", - "name": "-- Grafana --", - "meta": grafanaDatasourceMeta, - } - - // add mixed backend data source - datasources["-- Mixed --"] = map[string]interface{}{ - "type": "mixed", - "meta": plugins.DataSources["mixed"], + // add datasources that are built in (meaning they are not added via data sources page, nor have any entry in datasource table) + for _, ds := range plugins.DataSources { + if ds.BuiltIn { + datasources[ds.Name] = map[string]interface{}{ + "type": ds.Type, + "name": ds.Name, + "meta": plugins.DataSources[ds.Id], + } + } } if defaultDatasource == "" { @@ -169,10 +166,12 @@ func getPanelSort(id string) int { sort = 3 case "text": sort = 4 - case "alertlist": + case "heatmap": sort = 5 - case "dashlist": + case "alertlist": sort = 6 + case "dashlist": + sort = 7 } return sort } diff --git a/pkg/api/gnetproxy.go b/pkg/api/grafana_com_proxy.go similarity index 88% rename from pkg/api/gnetproxy.go rename to pkg/api/grafana_com_proxy.go index 7761729b8af..5db508f4e11 100644 --- a/pkg/api/gnetproxy.go +++ b/pkg/api/grafana_com_proxy.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/grafana/pkg/util" ) -var gNetProxyTransport = &http.Transport{ +var grafanaComProxyTransport = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: false}, Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ @@ -24,7 +24,7 @@ var gNetProxyTransport = &http.Transport{ } func ReverseProxyGnetReq(proxyPath string) *httputil.ReverseProxy { - url, _ := url.Parse(setting.GrafanaNetUrl) + url, _ := url.Parse(setting.GrafanaComUrl) director := func(req *http.Request) { req.URL.Scheme = url.Scheme @@ -45,7 +45,7 @@ func ReverseProxyGnetReq(proxyPath string) *httputil.ReverseProxy { func ProxyGnetRequest(c *middleware.Context) { proxyPath := c.Params("*") proxy := ReverseProxyGnetReq(proxyPath) - proxy.Transport = gNetProxyTransport + proxy.Transport = grafanaComProxyTransport proxy.ServeHTTP(c.Resp, c.Req.Request) c.Resp.Header().Del("Set-Cookie") } diff --git a/pkg/api/http_server.go b/pkg/api/http_server.go index fe7ca21f402..1e143ef876f 100644 --- a/pkg/api/http_server.go +++ b/pkg/api/http_server.go @@ -5,6 +5,7 @@ import ( "crypto/tls" "errors" "fmt" + "net" "net/http" "os" "path" @@ -13,9 +14,12 @@ import ( "github.com/grafana/grafana/pkg/api/live" httpstatic "github.com/grafana/grafana/pkg/api/static" + "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/cmd/grafana-cli/logger" + "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/middleware" + "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/setting" ) @@ -46,7 +50,7 @@ func (hs *HttpServer) Start(ctx context.Context) error { hs.streamManager.Run(ctx) listenAddr := fmt.Sprintf("%s:%s", setting.HttpAddr, setting.HttpPort) - hs.log.Info("Initializing HTTP Server", "address", listenAddr, "protocol", setting.Protocol, "subUrl", setting.AppSubUrl) + hs.log.Info("Initializing HTTP Server", "address", listenAddr, "protocol", setting.Protocol, "subUrl", setting.AppSubUrl, "socket", setting.SocketPath) hs.httpSrv = &http.Server{Addr: listenAddr, Handler: hs.macaron} switch setting.Protocol { @@ -62,6 +66,18 @@ func (hs *HttpServer) Start(ctx context.Context) error { hs.log.Debug("server was shutdown gracefully") return nil } + case setting.SOCKET: + ln, err := net.Listen("unix", setting.SocketPath) + if err != nil { + hs.log.Debug("server was shutdown gracefully") + return nil + } + + err = hs.httpSrv.Serve(ln) + if err != nil { + hs.log.Debug("server was shutdown gracefully") + return nil + } default: hs.log.Error("Invalid protocol", "protocol", setting.Protocol) err = errors.New("Invalid Protocol") @@ -147,6 +163,7 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron { Delims: macaron.Delims{Left: "[[", Right: "]]"}, })) + m.Use(hs.healthHandler) m.Use(middleware.GetContextHandler()) m.Use(middleware.Sessioner(&setting.SessionOptions)) m.Use(middleware.RequestMetrics()) @@ -160,6 +177,29 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron { return m } +func (hs *HttpServer) healthHandler(ctx *macaron.Context) { + if ctx.Req.Method != "GET" || ctx.Req.URL.Path != "/api/health" { + return + } + + data := simplejson.New() + data.Set("database", "ok") + data.Set("version", setting.BuildVersion) + data.Set("commit", setting.BuildCommit) + + if err := bus.Dispatch(&models.GetDBHealthQuery{}); err != nil { + data.Set("database", "failing") + ctx.Resp.Header().Set("Content-Type", "application/json; charset=UTF-8") + ctx.Resp.WriteHeader(503) + } else { + ctx.Resp.Header().Set("Content-Type", "application/json; charset=UTF-8") + ctx.Resp.WriteHeader(200) + } + + dataBytes, _ := data.EncodePretty() + ctx.Resp.Write(dataBytes) +} + func (hs *HttpServer) mapStatic(m *macaron.Macaron, rootDir string, dir string, prefix string) { headers := func(c *macaron.Context) { c.Resp.Header().Set("Cache-Control", "public, max-age=3600") diff --git a/pkg/api/login.go b/pkg/api/login.go index d2bbf4e81b9..baec5f5f6c0 100644 --- a/pkg/api/login.go +++ b/pkg/api/login.go @@ -11,7 +11,6 @@ import ( "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" - "github.com/grafana/grafana/pkg/util" ) const ( @@ -79,8 +78,7 @@ func tryLoginUsingRememberCookie(c *middleware.Context) bool { user := userQuery.Result // validate remember me cookie - if val, _ := c.GetSuperSecureCookie( - util.EncodeMd5(user.Rands+user.Password), setting.CookieRememberName); val != user.Login { + if val, _ := c.GetSuperSecureCookie(user.Rands+user.Password, setting.CookieRememberName); val != user.Login { return false } @@ -142,7 +140,7 @@ func loginUserWithUser(user *m.User, c *middleware.Context) { days := 86400 * setting.LogInRememberDays if days > 0 { c.SetCookie(setting.CookieUserName, user.Login, days, setting.AppSubUrl+"/") - c.SetSuperSecureCookie(util.EncodeMd5(user.Rands+user.Password), setting.CookieRememberName, user.Login, days, setting.AppSubUrl+"/") + c.SetSuperSecureCookie(user.Rands+user.Password, setting.CookieRememberName, user.Login, days, setting.AppSubUrl+"/") } c.Session.Set(middleware.SESS_KEY_USERID, user.Id) diff --git a/pkg/api/login_oauth.go b/pkg/api/login_oauth.go index 5fbd9fea709..0d9ab83282c 100644 --- a/pkg/api/login_oauth.go +++ b/pkg/api/login_oauth.go @@ -28,6 +28,7 @@ var ( ErrEmailNotAllowed = errors.New("Required email domain not fulfilled") ErrSignUpNotAllowed = errors.New("Signup is not allowed for this adapter") ErrUsersQuotaReached = errors.New("Users quota reached") + ErrNoEmail = errors.New("Login provider didn't return an email address") ) func GenStateString() string { @@ -63,7 +64,7 @@ func OAuthLogin(ctx *middleware.Context) { if setting.OAuthService.OAuthInfos[name].HostedDomain == "" { ctx.Redirect(connect.AuthCodeURL(state, oauth2.AccessTypeOnline)) } else { - ctx.Redirect(connect.AuthCodeURL(state, oauth2.SetParam("hd", setting.OAuthService.OAuthInfos[name].HostedDomain), oauth2.AccessTypeOnline)) + ctx.Redirect(connect.AuthCodeURL(state, oauth2.SetAuthURLParam("hd", setting.OAuthService.OAuthInfos[name].HostedDomain), oauth2.AccessTypeOnline)) } return } @@ -134,6 +135,12 @@ func OAuthLogin(ctx *middleware.Context) { ctx.Logger.Debug("OAuthLogin got user info", "userInfo", userInfo) + // validate that we got at least an email address + if userInfo.Email == "" { + redirectWithError(ctx, ErrNoEmail) + return + } + // validate that the email is allowed to login to grafana if !connect.IsEmailAllowed(userInfo.Email) { redirectWithError(ctx, ErrEmailNotAllowed) diff --git a/pkg/api/metrics.go b/pkg/api/metrics.go index d10491950ef..e35e35cdab6 100644 --- a/pkg/api/metrics.go +++ b/pkg/api/metrics.go @@ -7,6 +7,7 @@ import ( "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" "github.com/grafana/grafana/pkg/models" @@ -50,13 +51,16 @@ func QueryMetrics(c *middleware.Context, reqDto dtos.MetricRequest) Response { return ApiError(500, "Metric request error", err) } + statusCode := 200 for _, res := range resp.Results { if res.Error != nil { res.ErrorString = res.Error.Error() + resp.Message = res.ErrorString + statusCode = 500 } } - return Json(200, &resp) + return Json(statusCode, &resp) } // GET /api/tsdb/testdata/scenarios @@ -141,3 +145,29 @@ func GenerateSqlTestData(c *middleware.Context) Response { return Json(200, &util.DynMap{"message": "OK"}) } + +// GET /api/tsdb/testdata/random-walk +func GetTestDataRandomWalk(c *middleware.Context) Response { + from := c.Query("from") + to := c.Query("to") + intervalMs := c.QueryInt64("intervalMs") + + timeRange := tsdb.NewTimeRange(from, to) + request := &tsdb.Request{TimeRange: timeRange} + + request.Queries = append(request.Queries, &tsdb.Query{ + RefId: "A", + IntervalMs: intervalMs, + Model: simplejson.NewFromAny(&util.DynMap{ + "scenario": "random_walk", + }), + DataSource: &models.DataSource{Type: "grafana-testdata-datasource"}, + }) + + resp, err := tsdb.HandleRequest(context.Background(), request) + if err != nil { + return ApiError(500, "Metric request error", err) + } + + return Json(200, &resp) +} diff --git a/pkg/api/password.go b/pkg/api/password.go index f3c2b0b7058..e71f1317ee4 100644 --- a/pkg/api/password.go +++ b/pkg/api/password.go @@ -12,7 +12,8 @@ func SendResetPasswordEmail(c *middleware.Context, form dtos.SendResetPasswordEm userQuery := m.GetUserByLoginQuery{LoginOrEmail: form.UserOrEmail} if err := bus.Dispatch(&userQuery); err != nil { - return ApiError(404, "User does not exist", err) + c.Logger.Info("Requested password reset for user that was not found", "user", userQuery.LoginOrEmail) + return ApiError(200, "Email sent", err) } emailCmd := m.SendResetPasswordEmailCommand{User: userQuery.Result} diff --git a/pkg/api/playlist_play.go b/pkg/api/playlist_play.go index 780767531a8..29a806ce23d 100644 --- a/pkg/api/playlist_play.go +++ b/pkg/api/playlist_play.go @@ -91,6 +91,6 @@ func LoadPlaylistDashboards(orgId, userId, playlistId int64) (dtos.PlaylistDashb result = append(result, k...) result = append(result, populateDashboardsByTag(orgId, userId, dashboardByTag, dashboardTagOrder)...) - sort.Sort(sort.Reverse(result)) + sort.Sort(result) return result, nil } diff --git a/pkg/api/pluginproxy/pluginproxy.go b/pkg/api/pluginproxy/pluginproxy.go index 21d40ecb948..a5139bb69f7 100644 --- a/pkg/api/pluginproxy/pluginproxy.go +++ b/pkg/api/pluginproxy/pluginproxy.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "net" "net/http" "net/http/httputil" "net/url" @@ -71,7 +72,25 @@ func NewApiPluginProxy(ctx *middleware.Context, proxyPath string, route *plugins req.Header.Del("Cookie") req.Header.Del("Set-Cookie") - //Create a HTTP header with the context in it. + // clear X-Forwarded Host/Port/Proto headers + req.Header.Del("X-Forwarded-Host") + req.Header.Del("X-Forwarded-Port") + req.Header.Del("X-Forwarded-Proto") + + // set X-Forwarded-For header + if req.RemoteAddr != "" { + remoteAddr, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + remoteAddr = req.RemoteAddr + } + if req.Header.Get("X-Forwarded-For") != "" { + req.Header.Set("X-Forwarded-For", req.Header.Get("X-Forwarded-For")+", "+remoteAddr) + } else { + req.Header.Set("X-Forwarded-For", remoteAddr) + } + } + + // Create a HTTP header with the context in it. ctxJson, err := json.Marshal(ctx.SignedInUser) if err != nil { ctx.JsonApiErr(500, "failed to marshal context to json.", err) @@ -93,6 +112,8 @@ func NewApiPluginProxy(ctx *middleware.Context, proxyPath string, route *plugins } } + // reqBytes, _ := httputil.DumpRequestOut(req, true); + // log.Trace("Proxying plugin request: %s", string(reqBytes)) } return &httputil.ReverseProxy{Director: director} diff --git a/pkg/api/pluginproxy/pluginproxy_test.go b/pkg/api/pluginproxy/pluginproxy_test.go index f14ca6975e6..424c3fd670c 100644 --- a/pkg/api/pluginproxy/pluginproxy_test.go +++ b/pkg/api/pluginproxy/pluginproxy_test.go @@ -23,9 +23,14 @@ func TestPluginProxy(t *testing.T) { setting.SecretKey = "password" bus.AddHandler("test", func(query *m.GetPluginSettingByIdQuery) error { + key, err := util.Encrypt([]byte("123"), "password") + if err != nil { + return err + } + query.Result = &m.PluginSetting{ SecureJsonData: map[string][]byte{ - "key": util.Encrypt([]byte("123"), "password"), + "key": key, }, } return nil diff --git a/pkg/cmd/grafana-cli/commands/commands.go b/pkg/cmd/grafana-cli/commands/commands.go index 8b2ecfcf7f5..d8f01bbdcab 100644 --- a/pkg/cmd/grafana-cli/commands/commands.go +++ b/pkg/cmd/grafana-cli/commands/commands.go @@ -11,22 +11,18 @@ import ( "github.com/grafana/grafana/pkg/setting" ) -var configFile = flag.String("config", "", "path to config file") -var homePath = flag.String("homepath", "", "path to grafana install/home path, defaults to working directory") - func runDbCommand(command func(commandLine CommandLine) error) func(context *cli.Context) { return func(context *cli.Context) { + cmd := &contextCommandLine{context} - flag.Parse() setting.NewConfigContext(&setting.CommandLineArgs{ - Config: *configFile, - HomePath: *homePath, + Config: cmd.String("config"), + HomePath: cmd.String("homepath"), Args: flag.Args(), }) sqlstore.NewEngine() - cmd := &contextCommandLine{context} if err := command(cmd); err != nil { logger.Errorf("\n%s: ", color.RedString("Error")) logger.Errorf("%s\n\n", err) @@ -95,6 +91,16 @@ var adminCommands = []cli.Command{ Name: "reset-admin-password", Usage: "reset-admin-password ", Action: runDbCommand(resetPasswordCommand), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "homepath", + Usage: "path to grafana install/home path, defaults to working directory", + }, + cli.StringFlag{ + Name: "config", + Usage: "path to config file", + }, + }, }, } diff --git a/pkg/cmd/grafana-server/main.go b/pkg/cmd/grafana-server/main.go index 7e8a6061a3f..cd3dd0dbd36 100644 --- a/pkg/cmd/grafana-server/main.go +++ b/pkg/cmd/grafana-server/main.go @@ -8,10 +8,14 @@ import ( "os/signal" "path/filepath" "runtime" + "runtime/trace" "strconv" "syscall" "time" + "net/http" + _ "net/http/pprof" + "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/services/sqlstore" @@ -44,12 +48,33 @@ func init() { func main() { v := flag.Bool("v", false, "prints current version and exits") + profile := flag.Bool("profile", false, "Turn on pprof profiling") + profilePort := flag.Int("profile-port", 6060, "Define custom port for profiling") flag.Parse() if *v { fmt.Printf("Version %s (commit: %s)\n", version, commit) os.Exit(0) } + if *profile { + runtime.SetBlockProfileRate(1) + go func() { + http.ListenAndServe(fmt.Sprintf("localhost:%d", *profilePort), nil) + }() + + f, err := os.Create("trace.out") + if err != nil { + panic(err) + } + defer f.Close() + + err = trace.Start(f) + if err != nil { + panic(err) + } + defer trace.Stop() + } + buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64) if buildstampInt64 == 0 { buildstampInt64 = time.Now().Unix() @@ -113,6 +138,8 @@ func listenToSystemSignals(server models.GrafanaServer) { select { case sig := <-signalChan: + // Stops trace if profiling has been enabled + trace.Stop() server.Shutdown(0, fmt.Sprintf("system signal: %s", sig)) case code = <-exitChan: server.Shutdown(code, "startup error") diff --git a/pkg/components/dashdiffs/compare.go b/pkg/components/dashdiffs/compare.go new file mode 100644 index 00000000000..f5f2104cb92 --- /dev/null +++ b/pkg/components/dashdiffs/compare.go @@ -0,0 +1,149 @@ +package dashdiffs + +import ( + "encoding/json" + "errors" + + "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/components/simplejson" + "github.com/grafana/grafana/pkg/log" + "github.com/grafana/grafana/pkg/models" + diff "github.com/yudai/gojsondiff" + deltaFormatter "github.com/yudai/gojsondiff/formatter" +) + +var ( + // ErrUnsupportedDiffType occurs when an invalid diff type is used. + ErrUnsupportedDiffType = errors.New("dashdiff: unsupported diff type") + + // ErrNilDiff occurs when two compared interfaces are identical. + ErrNilDiff = errors.New("dashdiff: diff is nil") + + diffLogger = log.New("dashdiffs") +) + +type DiffType int + +const ( + DiffJSON DiffType = iota + DiffBasic + DiffDelta +) + +type Options struct { + OrgId int64 + Base DiffTarget + New DiffTarget + DiffType DiffType +} + +type DiffTarget struct { + DashboardId int64 + Version int + UnsavedDashboard *simplejson.Json +} + +type Result struct { + Delta []byte `json:"delta"` +} + +func ParseDiffType(diff string) DiffType { + switch diff { + case "json": + return DiffJSON + case "basic": + return DiffBasic + case "delta": + return DiffDelta + } + return DiffBasic +} + +// CompareDashboardVersionsCommand computes the JSON diff of two versions, +// assigning the delta of the diff to the `Delta` field. +func CalculateDiff(options *Options) (*Result, error) { + baseVersionQuery := models.GetDashboardVersionQuery{ + DashboardId: options.Base.DashboardId, + Version: options.Base.Version, + OrgId: options.OrgId, + } + + if err := bus.Dispatch(&baseVersionQuery); err != nil { + return nil, err + } + + newVersionQuery := models.GetDashboardVersionQuery{ + DashboardId: options.New.DashboardId, + Version: options.New.Version, + OrgId: options.OrgId, + } + + if err := bus.Dispatch(&newVersionQuery); err != nil { + return nil, err + } + + baseData := baseVersionQuery.Result.Data + newData := newVersionQuery.Result.Data + + left, jsonDiff, err := getDiff(baseData, newData) + if err != nil { + return nil, err + } + + result := &Result{} + + switch options.DiffType { + case DiffDelta: + + deltaOutput, err := deltaFormatter.NewDeltaFormatter().Format(jsonDiff) + if err != nil { + return nil, err + } + result.Delta = []byte(deltaOutput) + + case DiffJSON: + jsonOutput, err := NewJSONFormatter(left).Format(jsonDiff) + if err != nil { + return nil, err + } + result.Delta = []byte(jsonOutput) + + case DiffBasic: + basicOutput, err := NewBasicFormatter(left).Format(jsonDiff) + if err != nil { + return nil, err + } + result.Delta = basicOutput + + default: + return nil, ErrUnsupportedDiffType + } + + return result, nil +} + +// getDiff computes the diff of two dashboard versions. +func getDiff(baseData, newData *simplejson.Json) (interface{}, diff.Diff, error) { + leftBytes, err := baseData.Encode() + if err != nil { + return nil, nil, err + } + + rightBytes, err := newData.Encode() + if err != nil { + return nil, nil, err + } + + jsonDiff, err := diff.New().Compare(leftBytes, rightBytes) + if err != nil { + return nil, nil, err + } + + if !jsonDiff.Modified() { + return nil, nil, ErrNilDiff + } + + left := make(map[string]interface{}) + err = json.Unmarshal(leftBytes, &left) + return left, jsonDiff, nil +} diff --git a/pkg/components/dashdiffs/formatter_basic.go b/pkg/components/dashdiffs/formatter_basic.go new file mode 100644 index 00000000000..01c7757112d --- /dev/null +++ b/pkg/components/dashdiffs/formatter_basic.go @@ -0,0 +1,339 @@ +package dashdiffs + +import ( + "bytes" + "html/template" + + diff "github.com/yudai/gojsondiff" +) + +// A BasicDiff holds the stateful values that are used when generating a basic +// diff from JSON tokens. +type BasicDiff struct { + narrow string + keysIdent int + writing bool + LastIndent int + Block *BasicBlock + Change *BasicChange + Summary *BasicSummary +} + +// A BasicBlock represents a top-level element in a basic diff. +type BasicBlock struct { + Title string + Old interface{} + New interface{} + Change ChangeType + Changes []*BasicChange + Summaries []*BasicSummary + LineStart int + LineEnd int +} + +// A BasicChange represents the change from an old to new value. There are many +// BasicChanges in a BasicBlock. +type BasicChange struct { + Key string + Old interface{} + New interface{} + Change ChangeType + LineStart int + LineEnd int +} + +// A BasicSummary represents the changes within a basic block that're too deep +// or verbose to be represented in the top-level BasicBlock element, or in the +// BasicChange. Instead of showing the values in this case, we simply print +// the key and count how many times the given change was applied to that +// element. +type BasicSummary struct { + Key string + Change ChangeType + Count int + LineStart int + LineEnd int +} + +type BasicFormatter struct { + jsonDiff *JSONFormatter + tpl *template.Template +} + +func NewBasicFormatter(left interface{}) *BasicFormatter { + tpl := template.Must(template.New("block").Funcs(tplFuncMap).Parse(tplBlock)) + tpl = template.Must(tpl.New("change").Funcs(tplFuncMap).Parse(tplChange)) + tpl = template.Must(tpl.New("summary").Funcs(tplFuncMap).Parse(tplSummary)) + + return &BasicFormatter{ + jsonDiff: NewJSONFormatter(left), + tpl: tpl, + } +} + +func (b *BasicFormatter) Format(d diff.Diff) ([]byte, error) { + // calling jsonDiff.Format(d) populates the JSON diff's "Lines" value, + // which we use to compute the basic dif + _, err := b.jsonDiff.Format(d) + if err != nil { + return nil, err + } + + bd := &BasicDiff{} + blocks := bd.Basic(b.jsonDiff.Lines) + buf := &bytes.Buffer{} + + err = b.tpl.ExecuteTemplate(buf, "block", blocks) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Basic is V2 of the basic diff +func (b *BasicDiff) Basic(lines []*JSONLine) []*BasicBlock { + // init an array you can append to for the basic "blocks" + blocks := make([]*BasicBlock, 0) + + // iterate through each line + for _, line := range lines { + // TODO: this condition needs an explaination? what does it mean? + if b.LastIndent == 2 && line.Indent == 1 && line.Change == ChangeNil { + if b.Block != nil { + blocks = append(blocks, b.Block) + } + } + + b.LastIndent = line.Indent + + // TODO: why special handling for indent 2? + if line.Indent == 1 { + switch line.Change { + case ChangeNil: + if line.Change == ChangeNil { + if line.Key != "" { + b.Block = &BasicBlock{ + Title: line.Key, + Change: line.Change, + } + } + } + + case ChangeAdded, ChangeDeleted: + blocks = append(blocks, &BasicBlock{ + Title: line.Key, + Change: line.Change, + New: line.Val, + LineStart: line.LineNum, + }) + + case ChangeOld: + b.Block = &BasicBlock{ + Title: line.Key, + Old: line.Val, + Change: line.Change, + LineStart: line.LineNum, + } + + case ChangeNew: + b.Block.New = line.Val + b.Block.LineEnd = line.LineNum + + // then write out the change + blocks = append(blocks, b.Block) + default: + // ok + } + } + + // TODO: why special handling for indent > 2 ? + // Other Lines + if line.Indent > 1 { + // Ensure single line change + if line.Key != "" && line.Val != nil && !b.writing { + switch line.Change { + case ChangeAdded, ChangeDeleted: + + b.Block.Changes = append(b.Block.Changes, &BasicChange{ + Key: line.Key, + Change: line.Change, + New: line.Val, + LineStart: line.LineNum, + }) + + case ChangeOld: + b.Change = &BasicChange{ + Key: line.Key, + Change: line.Change, + Old: line.Val, + LineStart: line.LineNum, + } + + case ChangeNew: + b.Change.New = line.Val + b.Change.LineEnd = line.LineNum + b.Block.Changes = append(b.Block.Changes, b.Change) + + default: + //ok + } + + } else { + if line.Change != ChangeUnchanged { + if line.Key != "" { + b.narrow = line.Key + b.keysIdent = line.Indent + } + + if line.Change != ChangeNil { + if !b.writing { + b.writing = true + key := b.Block.Title + + if b.narrow != "" { + key = b.narrow + if b.keysIdent > line.Indent { + key = b.Block.Title + } + } + + b.Summary = &BasicSummary{ + Key: key, + Change: line.Change, + LineStart: line.LineNum, + } + } + } + } else { + if b.writing { + b.writing = false + b.Summary.LineEnd = line.LineNum + b.Block.Summaries = append(b.Block.Summaries, b.Summary) + } + } + } + } + } + + return blocks +} + +// encStateMap is used in the template helper +var ( + encStateMap = map[ChangeType]string{ + ChangeAdded: "added", + ChangeDeleted: "deleted", + ChangeOld: "changed", + ChangeNew: "changed", + } + + // tplFuncMap is the function map for each template + tplFuncMap = template.FuncMap{ + "getChange": func(c ChangeType) string { + state, ok := encStateMap[c] + if !ok { + return "changed" + } + return state + }, + } +) + +var ( + // tplBlock is the whole thing + tplBlock = `{{ define "block" -}} +{{ range . }} +
+
+

+ + {{ .Title }} {{ getChange .Change }} +

+ + + + {{ if .Old }} +
{{ .Old }}
+ + {{ end }} + {{ if .New }} +
{{ .New }}
+ {{ end }} + + {{ if .LineStart }} + + {{ end }} + +
+ + + {{ range .Changes }} +
    + {{ template "change" . }} +
+ {{ end }} + + + {{ range .Summaries }} + {{ template "summary" . }} + {{ end }} + +
+{{ end }} +{{ end }}` + + // tplChange is the template for changes + tplChange = `{{ define "change" -}} +
  • + +
    {{ getChange .Change }} {{ .Key }}
    + +
    + {{ if .Old }} +
    {{ .Old }}
    + + {{ end }} + {{ if .New }} +
    {{ .New }}
    + {{ end }} +
    + + {{ if .LineStart }} + + {{ end }} +
    +
  • +{{ end }}` + + // tplSummary is for basis summaries + tplSummary = `{{ define "summary" -}} +
    + + + {{ if .Count }} + {{ .Count }} + {{ end }} + + {{ if .Key }} + {{ .Key }} + {{ getChange .Change }} + {{ end }} + + {{ if .LineStart }} + + {{ end }} +
    +{{ end }}` +) diff --git a/pkg/components/dashdiffs/formatter_json.go b/pkg/components/dashdiffs/formatter_json.go new file mode 100644 index 00000000000..a2807b15992 --- /dev/null +++ b/pkg/components/dashdiffs/formatter_json.go @@ -0,0 +1,477 @@ +package dashdiffs + +import ( + "bytes" + "errors" + "fmt" + "html/template" + "sort" + + diff "github.com/yudai/gojsondiff" +) + +type ChangeType int + +const ( + ChangeNil ChangeType = iota + ChangeAdded + ChangeDeleted + ChangeOld + ChangeNew + ChangeUnchanged +) + +var ( + // changeTypeToSymbol is used for populating the terminating characer in + // the diff + changeTypeToSymbol = map[ChangeType]string{ + ChangeNil: "", + ChangeAdded: "+", + ChangeDeleted: "-", + ChangeOld: "-", + ChangeNew: "+", + } + + // changeTypeToName is used for populating class names in the diff + changeTypeToName = map[ChangeType]string{ + ChangeNil: "same", + ChangeAdded: "added", + ChangeDeleted: "deleted", + ChangeOld: "old", + ChangeNew: "new", + } +) + +var ( + // tplJSONDiffWrapper is the template that wraps a diff + tplJSONDiffWrapper = `{{ define "JSONDiffWrapper" -}} + {{ range $index, $element := . }} + {{ template "JSONDiffLine" $element }} + {{ end }} +{{ end }}` + + // tplJSONDiffLine is the template that prints each line in a diff + tplJSONDiffLine = `{{ define "JSONDiffLine" -}} +

    + + {{if .LeftLine }}{{ .LeftLine }}{{ end }} + + + {{if .RightLine }}{{ .RightLine }}{{ end }} + + + {{ .Text }} + + {{ ctos .Change }} +

    +{{ end }}` +) + +var diffTplFuncs = template.FuncMap{ + "ctos": func(c ChangeType) string { + if symbol, ok := changeTypeToSymbol[c]; ok { + return symbol + } + return "" + }, + "cton": func(c ChangeType) string { + if name, ok := changeTypeToName[c]; ok { + return name + } + return "" + }, +} + +// JSONLine contains the data required to render each line of the JSON diff +// and contains the data required to produce the tokens output in the basic +// diff. +type JSONLine struct { + LineNum int `json:"line"` + LeftLine int `json:"leftLine"` + RightLine int `json:"rightLine"` + Indent int `json:"indent"` + Text string `json:"text"` + Change ChangeType `json:"changeType"` + Key string `json:"key"` + Val interface{} `json:"value"` +} + +func NewJSONFormatter(left interface{}) *JSONFormatter { + tpl := template.Must(template.New("JSONDiffWrapper").Funcs(diffTplFuncs).Parse(tplJSONDiffWrapper)) + tpl = template.Must(tpl.New("JSONDiffLine").Funcs(diffTplFuncs).Parse(tplJSONDiffLine)) + + return &JSONFormatter{ + left: left, + Lines: []*JSONLine{}, + tpl: tpl, + path: []string{}, + size: []int{}, + lineCount: 0, + inArray: []bool{}, + } +} + +type JSONFormatter struct { + left interface{} + path []string + size []int + inArray []bool + lineCount int + leftLine int + rightLine int + line *AsciiLine + Lines []*JSONLine + tpl *template.Template +} + +type AsciiLine struct { + // the type of change + change ChangeType + + // the actual changes - no formatting + key string + val interface{} + + // level of indentation for the current line + indent int + + // buffer containing the fully formatted line + buffer *bytes.Buffer +} + +func (f *JSONFormatter) Format(diff diff.Diff) (result string, err error) { + if v, ok := f.left.(map[string]interface{}); ok { + f.formatObject(v, diff) + } else if v, ok := f.left.([]interface{}); ok { + f.formatArray(v, diff) + } else { + return "", fmt.Errorf("expected map[string]interface{} or []interface{}, got %T", + f.left) + } + + b := &bytes.Buffer{} + err = f.tpl.ExecuteTemplate(b, "JSONDiffWrapper", f.Lines) + if err != nil { + fmt.Printf("%v\n", err) + return "", err + } + return b.String(), nil +} + +func (f *JSONFormatter) formatObject(left map[string]interface{}, df diff.Diff) { + f.addLineWith(ChangeNil, "{") + f.push("ROOT", len(left), false) + f.processObject(left, df.Deltas()) + f.pop() + f.addLineWith(ChangeNil, "}") +} + +func (f *JSONFormatter) formatArray(left []interface{}, df diff.Diff) { + f.addLineWith(ChangeNil, "[") + f.push("ROOT", len(left), true) + f.processArray(left, df.Deltas()) + f.pop() + f.addLineWith(ChangeNil, "]") +} + +func (f *JSONFormatter) processArray(array []interface{}, deltas []diff.Delta) error { + patchedIndex := 0 + for index, value := range array { + f.processItem(value, deltas, diff.Index(index)) + patchedIndex++ + } + + // additional Added + for _, delta := range deltas { + switch delta.(type) { + case *diff.Added: + d := delta.(*diff.Added) + // skip items already processed + if int(d.Position.(diff.Index)) < len(array) { + continue + } + f.printRecursive(d.Position.String(), d.Value, ChangeAdded) + } + } + + return nil +} + +func (f *JSONFormatter) processObject(object map[string]interface{}, deltas []diff.Delta) error { + names := sortKeys(object) + for _, name := range names { + value := object[name] + f.processItem(value, deltas, diff.Name(name)) + } + + // Added + for _, delta := range deltas { + switch delta.(type) { + case *diff.Added: + d := delta.(*diff.Added) + f.printRecursive(d.Position.String(), d.Value, ChangeAdded) + } + } + + return nil +} + +func (f *JSONFormatter) processItem(value interface{}, deltas []diff.Delta, position diff.Position) error { + matchedDeltas := f.searchDeltas(deltas, position) + positionStr := position.String() + if len(matchedDeltas) > 0 { + for _, matchedDelta := range matchedDeltas { + + switch matchedDelta.(type) { + case *diff.Object: + d := matchedDelta.(*diff.Object) + switch value.(type) { + case map[string]interface{}: + //ok + default: + return errors.New("Type mismatch") + } + o := value.(map[string]interface{}) + + f.newLine(ChangeNil) + f.printKey(positionStr) + f.print("{") + f.closeLine() + f.push(positionStr, len(o), false) + f.processObject(o, d.Deltas) + f.pop() + f.newLine(ChangeNil) + f.print("}") + f.printComma() + f.closeLine() + + case *diff.Array: + d := matchedDelta.(*diff.Array) + switch value.(type) { + case []interface{}: + //ok + default: + return errors.New("Type mismatch") + } + a := value.([]interface{}) + + f.newLine(ChangeNil) + f.printKey(positionStr) + f.print("[") + f.closeLine() + f.push(positionStr, len(a), true) + f.processArray(a, d.Deltas) + f.pop() + f.newLine(ChangeNil) + f.print("]") + f.printComma() + f.closeLine() + + case *diff.Added: + d := matchedDelta.(*diff.Added) + f.printRecursive(positionStr, d.Value, ChangeAdded) + f.size[len(f.size)-1]++ + + case *diff.Modified: + d := matchedDelta.(*diff.Modified) + savedSize := f.size[len(f.size)-1] + f.printRecursive(positionStr, d.OldValue, ChangeOld) + f.size[len(f.size)-1] = savedSize + f.printRecursive(positionStr, d.NewValue, ChangeNew) + + case *diff.TextDiff: + savedSize := f.size[len(f.size)-1] + d := matchedDelta.(*diff.TextDiff) + f.printRecursive(positionStr, d.OldValue, ChangeOld) + f.size[len(f.size)-1] = savedSize + f.printRecursive(positionStr, d.NewValue, ChangeNew) + + case *diff.Deleted: + d := matchedDelta.(*diff.Deleted) + f.printRecursive(positionStr, d.Value, ChangeDeleted) + + default: + return errors.New("Unknown Delta type detected") + } + + } + } else { + f.printRecursive(positionStr, value, ChangeUnchanged) + } + + return nil +} + +func (f *JSONFormatter) searchDeltas(deltas []diff.Delta, postion diff.Position) (results []diff.Delta) { + results = make([]diff.Delta, 0) + for _, delta := range deltas { + switch delta.(type) { + case diff.PostDelta: + if delta.(diff.PostDelta).PostPosition() == postion { + results = append(results, delta) + } + case diff.PreDelta: + if delta.(diff.PreDelta).PrePosition() == postion { + results = append(results, delta) + } + default: + panic("heh") + } + } + return +} + +func (f *JSONFormatter) push(name string, size int, array bool) { + f.path = append(f.path, name) + f.size = append(f.size, size) + f.inArray = append(f.inArray, array) +} + +func (f *JSONFormatter) pop() { + f.path = f.path[0 : len(f.path)-1] + f.size = f.size[0 : len(f.size)-1] + f.inArray = f.inArray[0 : len(f.inArray)-1] +} + +func (f *JSONFormatter) addLineWith(change ChangeType, value string) { + f.line = &AsciiLine{ + change: change, + indent: len(f.path), + buffer: bytes.NewBufferString(value), + } + f.closeLine() +} + +func (f *JSONFormatter) newLine(change ChangeType) { + f.line = &AsciiLine{ + change: change, + indent: len(f.path), + buffer: bytes.NewBuffer([]byte{}), + } +} + +func (f *JSONFormatter) closeLine() { + leftLine := 0 + rightLine := 0 + f.lineCount++ + + switch f.line.change { + case ChangeAdded, ChangeNew: + f.rightLine++ + rightLine = f.rightLine + + case ChangeDeleted, ChangeOld: + f.leftLine++ + leftLine = f.leftLine + + case ChangeNil, ChangeUnchanged: + f.rightLine++ + f.leftLine++ + rightLine = f.rightLine + leftLine = f.leftLine + } + + s := f.line.buffer.String() + f.Lines = append(f.Lines, &JSONLine{ + LineNum: f.lineCount, + RightLine: rightLine, + LeftLine: leftLine, + Indent: f.line.indent, + Text: s, + Change: f.line.change, + Key: f.line.key, + Val: f.line.val, + }) +} + +func (f *JSONFormatter) printKey(name string) { + if !f.inArray[len(f.inArray)-1] { + f.line.key = name + fmt.Fprintf(f.line.buffer, `"%s": `, name) + } +} + +func (f *JSONFormatter) printComma() { + f.size[len(f.size)-1]-- + if f.size[len(f.size)-1] > 0 { + f.line.buffer.WriteRune(',') + } +} + +func (f *JSONFormatter) printValue(value interface{}) { + switch value.(type) { + case string: + f.line.val = value + fmt.Fprintf(f.line.buffer, `"%s"`, value) + case nil: + f.line.val = "null" + f.line.buffer.WriteString("null") + default: + f.line.val = value + fmt.Fprintf(f.line.buffer, `%#v`, value) + } +} + +func (f *JSONFormatter) print(a string) { + f.line.buffer.WriteString(a) +} + +func (f *JSONFormatter) printRecursive(name string, value interface{}, change ChangeType) { + switch value.(type) { + case map[string]interface{}: + f.newLine(change) + f.printKey(name) + f.print("{") + f.closeLine() + + m := value.(map[string]interface{}) + size := len(m) + f.push(name, size, false) + + keys := sortKeys(m) + for _, key := range keys { + f.printRecursive(key, m[key], change) + } + f.pop() + + f.newLine(change) + f.print("}") + f.printComma() + f.closeLine() + + case []interface{}: + f.newLine(change) + f.printKey(name) + f.print("[") + f.closeLine() + + s := value.([]interface{}) + size := len(s) + f.push("", size, true) + for _, item := range s { + f.printRecursive("", item, change) + } + f.pop() + + f.newLine(change) + f.print("]") + f.printComma() + f.closeLine() + + default: + f.newLine(change) + f.printKey(name) + f.printValue(value) + f.printComma() + f.closeLine() + } +} + +func sortKeys(m map[string]interface{}) (keys []string) { + keys = make([]string, 0, len(m)) + for key := range m { + keys = append(keys, key) + } + sort.Strings(keys) + return +} diff --git a/pkg/components/imguploader/imguploader.go b/pkg/components/imguploader/imguploader.go index ae5c89a0049..883ef8eefda 100644 --- a/pkg/components/imguploader/imguploader.go +++ b/pkg/components/imguploader/imguploader.go @@ -47,10 +47,11 @@ func NewImageUploader() (ImageUploader, error) { return nil, fmt.Errorf("Could not find url key for image.uploader.webdav") } + public_url := webdavSec.Key("public_url").String() username := webdavSec.Key("username").String() password := webdavSec.Key("password").String() - return NewWebdavImageUploader(url, username, password) + return NewWebdavImageUploader(url, username, password, public_url) } return NopImageUploader{}, nil diff --git a/pkg/components/imguploader/s3uploader.go b/pkg/components/imguploader/s3uploader.go index 4f8632f965c..302420a27f4 100644 --- a/pkg/components/imguploader/s3uploader.go +++ b/pkg/components/imguploader/s3uploader.go @@ -78,5 +78,9 @@ func (u *S3Uploader) Upload(imageDiskPath string) (string, error) { return "", err } - return "https://" + u.bucket + ".s3.amazonaws.com/" + key, nil + if u.region == "us-east-1" { + return "https://" + u.bucket + ".s3.amazonaws.com/" + key, nil + } else { + return "https://" + u.bucket + ".s3-" + u.region + ".amazonaws.com/" + key, nil + } } diff --git a/pkg/components/imguploader/s3uploader_test.go b/pkg/components/imguploader/s3uploader_test.go index 1204a2db4e2..f75ad05af64 100644 --- a/pkg/components/imguploader/s3uploader_test.go +++ b/pkg/components/imguploader/s3uploader_test.go @@ -8,7 +8,7 @@ import ( ) func TestUploadToS3(t *testing.T) { - SkipConvey("[Integration test] for external_image_store.webdav", t, func() { + SkipConvey("[Integration test] for external_image_store.s3", t, func() { setting.NewConfigContext(&setting.CommandLineArgs{ HomePath: "../../../", }) diff --git a/pkg/components/imguploader/webdavuploader.go b/pkg/components/imguploader/webdavuploader.go index 93b3f9d2efa..4a056e3a48a 100644 --- a/pkg/components/imguploader/webdavuploader.go +++ b/pkg/components/imguploader/webdavuploader.go @@ -14,12 +14,14 @@ import ( ) type WebdavUploader struct { - url string - username string - password string + url string + username string + password string + public_url string } var netTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 60 * time.Second, }).Dial, @@ -33,7 +35,8 @@ var netClient = &http.Client{ func (u *WebdavUploader) Upload(pa string) (string, error) { url, _ := url.Parse(u.url) - url.Path = path.Join(url.Path, util.GetRandomString(20)+".png") + filename := util.GetRandomString(20) + ".png" + url.Path = path.Join(url.Path, filename) imgData, err := ioutil.ReadFile(pa) req, err := http.NewRequest("PUT", url.String(), bytes.NewReader(imgData)) @@ -53,13 +56,20 @@ func (u *WebdavUploader) Upload(pa string) (string, error) { return "", fmt.Errorf("Failed to upload image. Returned statuscode %v body %s", res.StatusCode, body) } + if u.public_url != "" { + publicURL, _ := url.Parse(u.public_url) + publicURL.Path = path.Join(publicURL.Path, filename) + return publicURL.String(), nil + } + return url.String(), nil } -func NewWebdavImageUploader(url, username, passwrod string) (*WebdavUploader, error) { +func NewWebdavImageUploader(url, username, password, public_url string) (*WebdavUploader, error) { return &WebdavUploader{ - url: url, - username: username, - password: passwrod, + url: url, + username: username, + password: password, + public_url: public_url, }, nil } diff --git a/pkg/components/imguploader/webdavuploader_test.go b/pkg/components/imguploader/webdavuploader_test.go index 273cd1c2a86..e88e28bd712 100644 --- a/pkg/components/imguploader/webdavuploader_test.go +++ b/pkg/components/imguploader/webdavuploader_test.go @@ -7,12 +7,21 @@ import ( ) func TestUploadToWebdav(t *testing.T) { - webdavUploader, _ := NewWebdavImageUploader("http://localhost:9998/dav/", "username", "password") + // Can be tested with this docker container: https://hub.docker.com/r/morrisjobke/webdav/ SkipConvey("[Integration test] for external_image_store.webdav", t, func() { + webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "") path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png") So(err, ShouldBeNil) - So(path, ShouldNotEqual, "") + So(path, ShouldStartWith, "http://localhost:8888/webdav/") + }) + + SkipConvey("[Integration test] for external_image_store.webdav with public url", t, func() { + webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "http://publicurl:8888/webdav") + path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png") + + So(err, ShouldBeNil) + So(path, ShouldStartWith, "http://publicurl:8888/webdav/") }) } diff --git a/pkg/components/securejsondata/securejsondata.go b/pkg/components/securejsondata/securejsondata.go index d7bec6894b0..caf4b369483 100644 --- a/pkg/components/securejsondata/securejsondata.go +++ b/pkg/components/securejsondata/securejsondata.go @@ -1,6 +1,7 @@ package securejsondata import ( + "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/util" ) @@ -10,7 +11,12 @@ type SecureJsonData map[string][]byte func (s SecureJsonData) Decrypt() map[string]string { decrypted := make(map[string]string) for key, data := range s { - decrypted[key] = string(util.Decrypt(data, setting.SecretKey)) + decryptedData, err := util.Decrypt(data, setting.SecretKey) + if err != nil { + log.Fatal(4, err.Error()) + } + + decrypted[key] = string(decryptedData) } return decrypted } @@ -18,7 +24,12 @@ func (s SecureJsonData) Decrypt() map[string]string { func GetEncryptedJsonData(sjd map[string]string) SecureJsonData { encrypted := make(SecureJsonData) for key, data := range sjd { - encrypted[key] = util.Encrypt([]byte(data), setting.SecretKey) + encryptedData, err := util.Encrypt([]byte(data), setting.SecretKey) + if err != nil { + log.Fatal(4, err.Error()) + } + + encrypted[key] = encryptedData } return encrypted } diff --git a/pkg/log/log.go b/pkg/log/log.go index fe0b312db23..56b2eb50c9f 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -15,6 +15,8 @@ import ( "github.com/go-stack/stack" "github.com/inconshreveable/log15" "github.com/inconshreveable/log15/term" + + "github.com/grafana/grafana/pkg/util" ) var Root log15.Logger @@ -34,7 +36,7 @@ func New(logger string, ctx ...interface{}) Logger { func Trace(format string, v ...interface{}) { var message string if len(v) > 0 { - message = fmt.Sprintf(format, v) + message = fmt.Sprintf(format, v...) } else { message = format } @@ -45,7 +47,7 @@ func Trace(format string, v ...interface{}) { func Debug(format string, v ...interface{}) { var message string if len(v) > 0 { - message = fmt.Sprintf(format, v) + message = fmt.Sprintf(format, v...) } else { message = format } @@ -60,7 +62,7 @@ func Debug2(message string, v ...interface{}) { func Info(format string, v ...interface{}) { var message string if len(v) > 0 { - message = fmt.Sprintf(format, v) + message = fmt.Sprintf(format, v...) } else { message = format } @@ -75,7 +77,7 @@ func Info2(message string, v ...interface{}) { func Warn(format string, v ...interface{}) { var message string if len(v) > 0 { - message = fmt.Sprintf(format, v) + message = fmt.Sprintf(format, v...) } else { message = format } @@ -88,7 +90,7 @@ func Warn2(message string, v ...interface{}) { } func Error(skip int, format string, v ...interface{}) { - Root.Error(fmt.Sprintf(format, v)) + Root.Error(fmt.Sprintf(format, v...)) } func Error2(message string, v ...interface{}) { @@ -96,7 +98,7 @@ func Error2(message string, v ...interface{}) { } func Critical(skip int, format string, v ...interface{}) { - Root.Crit(fmt.Sprintf(format, v)) + Root.Crit(fmt.Sprintf(format, v...)) } func Fatal(skip int, format string, v ...interface{}) { @@ -172,7 +174,7 @@ func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) { Close() defaultLevelName, _ := getLogLevelFromConfig("log", "info", cfg) - defaultFilters := getFilters(cfg.Section("log").Key("filters").Strings(" ")) + defaultFilters := getFilters(util.SplitString(cfg.Section("log").Key("filters").String())) handlers := make([]log15.Handler, 0) @@ -185,7 +187,7 @@ func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) { // Log level. _, level := getLogLevelFromConfig("log."+mode, defaultLevelName, cfg) - modeFilters := getFilters(sec.Key("filters").Strings(" ")) + modeFilters := getFilters(util.SplitString(sec.Key("filters").String())) format := getLogFormat(sec.Key("format").MustString("")) var handler log15.Handler diff --git a/pkg/metrics/meter.go b/pkg/metrics/meter.go index 8744a5cd040..265bff99cb6 100644 --- a/pkg/metrics/meter.go +++ b/pkg/metrics/meter.go @@ -124,7 +124,7 @@ func (m *StandardMeter) Count() int64 { return count } -// Mark records the occurance of n events. +// Mark records the occurrence of n events. func (m *StandardMeter) Mark(n int64) { m.lock.Lock() defer m.lock.Unlock() diff --git a/pkg/middleware/auth_proxy.go b/pkg/middleware/auth_proxy.go index e02e31f9152..8e94e1582b0 100644 --- a/pkg/middleware/auth_proxy.go +++ b/pkg/middleware/auth_proxy.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/grafana/pkg/setting" ) -func initContextWithAuthProxy(ctx *Context) bool { +func initContextWithAuthProxy(ctx *Context, orgId int64) bool { if !setting.AuthProxyEnabled { return false } @@ -30,6 +30,7 @@ func initContextWithAuthProxy(ctx *Context) bool { } query := getSignedInUserQueryForProxyAuth(proxyHeaderValue) + query.OrgId = orgId if err := bus.Dispatch(query); err != nil { if err != m.ErrUserNotFound { ctx.Handle(500, "Failed to find user specified in auth proxy header", err) @@ -46,7 +47,7 @@ func initContextWithAuthProxy(ctx *Context) bool { ctx.Handle(500, "Failed to create user specified in auth proxy header", err) return true } - query = &m.GetSignedInUserQuery{UserId: cmd.Result.Id} + query = &m.GetSignedInUserQuery{UserId: cmd.Result.Id, OrgId: orgId} if err := bus.Dispatch(query); err != nil { ctx.Handle(500, "Failed find user after creation", err) return true diff --git a/pkg/middleware/logger.go b/pkg/middleware/logger.go index 9bed7cbe16b..4db0aac069f 100644 --- a/pkg/middleware/logger.go +++ b/pkg/middleware/logger.go @@ -49,9 +49,9 @@ func Logger() macaron.Handler { if ctx, ok := c.Data["ctx"]; ok { ctxTyped := ctx.(*Context) if status == 500 { - ctxTyped.Logger.Error("Request Completed", "method", req.Method, "path", req.URL.Path, "status", status, "remote_addr", c.RemoteAddr(), "time_ms", timeTakenMs, "size", rw.Size()) + ctxTyped.Logger.Error("Request Completed", "method", req.Method, "path", req.URL.Path, "status", status, "remote_addr", c.RemoteAddr(), "time_ms", int64(timeTakenMs), "size", rw.Size(), "referer", req.Referer()) } else { - ctxTyped.Logger.Info("Request Completed", "method", req.Method, "path", req.URL.Path, "status", status, "remote_addr", c.RemoteAddr(), "time_ms", timeTakenMs, "size", rw.Size()) + ctxTyped.Logger.Info("Request Completed", "method", req.Method, "path", req.URL.Path, "status", status, "remote_addr", c.RemoteAddr(), "time_ms", int64(timeTakenMs), "size", rw.Size(), "referer", req.Referer()) } } } diff --git a/pkg/middleware/middleware.go b/pkg/middleware/middleware.go index 4b59fada62e..5aafe12d374 100644 --- a/pkg/middleware/middleware.go +++ b/pkg/middleware/middleware.go @@ -39,6 +39,12 @@ func GetContextHandler() macaron.Handler { Logger: log.New("context"), } + orgId := int64(0) + orgIdHeader := ctx.Req.Header.Get("X-Grafana-Org-Id") + if orgIdHeader != "" { + orgId, _ = strconv.ParseInt(orgIdHeader, 10, 64) + } + // the order in which these are tested are important // look for api key in Authorization header first // then init session and look for userId in session @@ -46,9 +52,9 @@ func GetContextHandler() macaron.Handler { // then test if anonymous access is enabled if initContextWithRenderAuth(ctx) || initContextWithApiKey(ctx) || - initContextWithBasicAuth(ctx) || - initContextWithAuthProxy(ctx) || - initContextWithUserSessionCookie(ctx) || + initContextWithBasicAuth(ctx, orgId) || + initContextWithAuthProxy(ctx, orgId) || + initContextWithUserSessionCookie(ctx, orgId) || initContextWithAnonymousUser(ctx) { } @@ -68,18 +74,18 @@ func initContextWithAnonymousUser(ctx *Context) bool { if err := bus.Dispatch(&orgQuery); err != nil { log.Error(3, "Anonymous access organization error: '%s': %s", setting.AnonymousOrgName, err) return false - } else { - ctx.IsSignedIn = false - ctx.AllowAnonymous = true - ctx.SignedInUser = &m.SignedInUser{} - ctx.OrgRole = m.RoleType(setting.AnonymousOrgRole) - ctx.OrgId = orgQuery.Result.Id - ctx.OrgName = orgQuery.Result.Name - return true } + + ctx.IsSignedIn = false + ctx.AllowAnonymous = true + ctx.SignedInUser = &m.SignedInUser{} + ctx.OrgRole = m.RoleType(setting.AnonymousOrgRole) + ctx.OrgId = orgQuery.Result.Id + ctx.OrgName = orgQuery.Result.Name + return true } -func initContextWithUserSessionCookie(ctx *Context) bool { +func initContextWithUserSessionCookie(ctx *Context, orgId int64) bool { // initialize session if err := ctx.Session.Start(ctx); err != nil { ctx.Logger.Error("Failed to start session", "error", err) @@ -91,15 +97,15 @@ func initContextWithUserSessionCookie(ctx *Context) bool { return false } - query := m.GetSignedInUserQuery{UserId: userId} + query := m.GetSignedInUserQuery{UserId: userId, OrgId: orgId} if err := bus.Dispatch(&query); err != nil { ctx.Logger.Error("Failed to get user with id", "userId", userId) return false - } else { - ctx.SignedInUser = query.Result - ctx.IsSignedIn = true - return true } + + ctx.SignedInUser = query.Result + ctx.IsSignedIn = true + return true } func initContextWithApiKey(ctx *Context) bool { @@ -114,30 +120,31 @@ func initContextWithApiKey(ctx *Context) bool { ctx.JsonApiErr(401, "Invalid API key", err) return true } + // fetch key keyQuery := m.GetApiKeyByNameQuery{KeyName: decoded.Name, OrgId: decoded.OrgId} if err := bus.Dispatch(&keyQuery); err != nil { ctx.JsonApiErr(401, "Invalid API key", err) return true - } else { - apikey := keyQuery.Result + } - // validate api key - if !apikeygen.IsValid(decoded, apikey.Key) { - ctx.JsonApiErr(401, "Invalid API key", err) - return true - } + apikey := keyQuery.Result - ctx.IsSignedIn = true - ctx.SignedInUser = &m.SignedInUser{} - ctx.OrgRole = apikey.Role - ctx.ApiKeyId = apikey.Id - ctx.OrgId = apikey.OrgId + // validate api key + if !apikeygen.IsValid(decoded, apikey.Key) { + ctx.JsonApiErr(401, "Invalid API key", err) return true } + + ctx.IsSignedIn = true + ctx.SignedInUser = &m.SignedInUser{} + ctx.OrgRole = apikey.Role + ctx.ApiKeyId = apikey.Id + ctx.OrgId = apikey.OrgId + return true } -func initContextWithBasicAuth(ctx *Context) bool { +func initContextWithBasicAuth(ctx *Context, orgId int64) bool { if !setting.BasicAuthEnabled { return false @@ -168,15 +175,15 @@ func initContextWithBasicAuth(ctx *Context) bool { return true } - query := m.GetSignedInUserQuery{UserId: user.Id} + query := m.GetSignedInUserQuery{UserId: user.Id, OrgId: orgId} if err := bus.Dispatch(&query); err != nil { ctx.JsonApiErr(401, "Authentication error", err) return true - } else { - ctx.SignedInUser = query.Result - ctx.IsSignedIn = true - return true } + + ctx.SignedInUser = query.Result + ctx.IsSignedIn = true + return true } // Handle handles and logs error by given status. diff --git a/pkg/middleware/org_redirect.go b/pkg/middleware/org_redirect.go index 5945663d4d1..a5f90d60e47 100644 --- a/pkg/middleware/org_redirect.go +++ b/pkg/middleware/org_redirect.go @@ -1,11 +1,13 @@ package middleware import ( + "fmt" "net/http" "strconv" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" "gopkg.in/macaron.v1" ) @@ -39,6 +41,7 @@ func OrgRedirect() macaron.Handler { return } - c.Redirect(c.Req.URL.String(), 302) + newUrl := setting.ToAbsUrl(fmt.Sprintf("%s?%s", c.Req.URL.Path, c.Req.URL.Query().Encode())) + c.Redirect(newUrl, 302) } } diff --git a/pkg/models/dashboard_version.go b/pkg/models/dashboard_version.go new file mode 100644 index 00000000000..06b5797e57c --- /dev/null +++ b/pkg/models/dashboard_version.go @@ -0,0 +1,71 @@ +package models + +import ( + "errors" + "time" + + "github.com/grafana/grafana/pkg/components/simplejson" +) + +var ( + ErrDashboardVersionNotFound = errors.New("Dashboard version not found") + ErrNoVersionsForDashboardId = errors.New("No dashboard versions found for the given DashboardId") +) + +// A DashboardVersion represents the comparable data in a dashboard, allowing +// diffs of the dashboard to be performed. +type DashboardVersion struct { + Id int64 `json:"id"` + DashboardId int64 `json:"dashboardId"` + ParentVersion int `json:"parentVersion"` + RestoredFrom int `json:"restoredFrom"` + Version int `json:"version"` + + Created time.Time `json:"created"` + CreatedBy int64 `json:"createdBy"` + + Message string `json:"message"` + Data *simplejson.Json `json:"data"` +} + +// DashboardVersionMeta extends the dashboard version model with the names +// associated with the UserIds, overriding the field with the same name from +// the DashboardVersion model. +type DashboardVersionMeta struct { + DashboardVersion + CreatedBy string `json:"createdBy"` +} + +// DashboardVersionDTO represents a dashboard version, without the dashboard +// map. +type DashboardVersionDTO struct { + Id int64 `json:"id"` + DashboardId int64 `json:"dashboardId"` + ParentVersion int `json:"parentVersion"` + RestoredFrom int `json:"restoredFrom"` + Version int `json:"version"` + Created time.Time `json:"created"` + CreatedBy string `json:"createdBy"` + Message string `json:"message"` +} + +// +// Queries +// + +type GetDashboardVersionQuery struct { + DashboardId int64 + OrgId int64 + Version int + + Result *DashboardVersion +} + +type GetDashboardVersionsQuery struct { + DashboardId int64 + OrgId int64 + Limit int + Start int + + Result []*DashboardVersionDTO +} diff --git a/pkg/models/dashboards.go b/pkg/models/dashboards.go index 634b26c3f29..0463e9c209b 100644 --- a/pkg/models/dashboards.go +++ b/pkg/models/dashboards.go @@ -98,12 +98,17 @@ func NewDashboardFromJson(data *simplejson.Json) *Dashboard { // GetDashboardModel turns the command into the savable model func (cmd *SaveDashboardCommand) GetDashboardModel() *Dashboard { dash := NewDashboardFromJson(cmd.Dashboard) + userId := cmd.UserId - if dash.Data.Get("version").MustInt(0) == 0 { - dash.CreatedBy = cmd.UserId + if userId == 0 { + userId = -1 } - dash.UpdatedBy = cmd.UserId + if dash.Data.Get("version").MustInt(0) == 0 { + dash.CreatedBy = userId + } + + dash.UpdatedBy = userId dash.OrgId = cmd.OrgId dash.PluginId = cmd.PluginId dash.UpdateSlug() @@ -126,11 +131,13 @@ func (dash *Dashboard) UpdateSlug() { // type SaveDashboardCommand struct { - Dashboard *simplejson.Json `json:"dashboard" binding:"Required"` - UserId int64 `json:"userId"` - OrgId int64 `json:"-"` - Overwrite bool `json:"overwrite"` - PluginId string `json:"-"` + Dashboard *simplejson.Json `json:"dashboard" binding:"Required"` + UserId int64 `json:"userId"` + Overwrite bool `json:"overwrite"` + Message string `json:"message"` + OrgId int64 `json:"-"` + RestoredFrom int `json:"-"` + PluginId string `json:"-"` Result *Dashboard } @@ -145,7 +152,8 @@ type DeleteDashboardCommand struct { // type GetDashboardQuery struct { - Slug string + Slug string // required if no Id is specified + Id int64 // optional if slug is set OrgId int64 Result *Dashboard diff --git a/pkg/models/datasource.go b/pkg/models/datasource.go index 804880a5d10..3fdfd9c47da 100644 --- a/pkg/models/datasource.go +++ b/pkg/models/datasource.go @@ -116,8 +116,9 @@ type UpdateDataSourceCommand struct { JsonData *simplejson.Json `json:"jsonData"` SecureJsonData map[string]string `json:"secureJsonData"` - OrgId int64 `json:"-"` - Id int64 `json:"-"` + OrgId int64 `json:"-"` + Id int64 `json:"-"` + Version int `json:"-"` } type DeleteDataSourceByIdCommand struct { diff --git a/pkg/models/datasource_cache_test.go b/pkg/models/datasource_cache_test.go index 25fee55529d..5e821ea28c4 100644 --- a/pkg/models/datasource_cache_test.go +++ b/pkg/models/datasource_cache_test.go @@ -54,10 +54,15 @@ func TestDataSourceCache(t *testing.T) { }) ds.JsonData = json + + tlsCaCert, _ := util.Encrypt([]byte(caCert), "password") + tlsClientCert, _ := util.Encrypt([]byte(clientCert), "password") + tlsClientKey, _ := util.Encrypt([]byte(clientKey), "password") + ds.SecureJsonData = map[string][]byte{ - "tlsCACert": util.Encrypt([]byte(caCert), "password"), - "tlsClientCert": util.Encrypt([]byte(clientCert), "password"), - "tlsClientKey": util.Encrypt([]byte(clientKey), "password"), + "tlsCACert": tlsCaCert, + "tlsClientCert": tlsClientCert, + "tlsClientKey": tlsClientKey, } ds.Updated = t.Add(-1 * time.Minute) diff --git a/pkg/models/health.go b/pkg/models/health.go new file mode 100644 index 00000000000..ee206ed2d6f --- /dev/null +++ b/pkg/models/health.go @@ -0,0 +1,3 @@ +package models + +type GetDBHealthQuery struct{} diff --git a/pkg/models/models.go b/pkg/models/models.go index 3f4b27ed6ab..c2560021ee1 100644 --- a/pkg/models/models.go +++ b/pkg/models/models.go @@ -7,5 +7,5 @@ const ( GOOGLE TWITTER GENERIC - GRAFANANET + GRAFANA_COM ) diff --git a/pkg/models/notifications.go b/pkg/models/notifications.go index ad7aed3bc50..089d7c4360d 100644 --- a/pkg/models/notifications.go +++ b/pkg/models/notifications.go @@ -3,6 +3,7 @@ package models import "errors" var ErrInvalidEmailCode = errors.New("Invalid or expired email code") +var ErrSmtpNotEnabled = errors.New("SMTP not configured, check your grafana.ini config file's [smtp] section.") type SendEmailCommand struct { To []string diff --git a/pkg/models/user.go b/pkg/models/user.go index e0a36be8c0a..bdf81056232 100644 --- a/pkg/models/user.go +++ b/pkg/models/user.go @@ -117,6 +117,7 @@ type GetSignedInUserQuery struct { UserId int64 Login string Email string + OrgId int64 Result *SignedInUser } diff --git a/pkg/services/alerting/extractor.go b/pkg/services/alerting/extractor.go index 28a6b8c61a0..7f14e195799 100644 --- a/pkg/services/alerting/extractor.go +++ b/pkg/services/alerting/extractor.go @@ -89,7 +89,7 @@ func (e *DashAlertExtractor) GetAlerts() ([]*m.Alert, error) { continue } - // backward compatability check, can be removed later + // backward compatibility check, can be removed later enabled, hasEnabled := jsonAlert.CheckGet("enabled") if hasEnabled && enabled.MustBool() == false { continue diff --git a/pkg/services/alerting/notifiers/email.go b/pkg/services/alerting/notifiers/email.go index dcf71f0f99e..cdde05ba62f 100644 --- a/pkg/services/alerting/notifiers/email.go +++ b/pkg/services/alerting/notifiers/email.go @@ -69,6 +69,11 @@ func (this *EmailNotifier) Notify(evalContext *alerting.EvalContext) error { return err } + error := "" + if evalContext.Error != nil { + error = evalContext.Error.Error() + } + cmd := &m.SendEmailCommandSync{ SendEmailCommand: m.SendEmailCommand{ Subject: evalContext.GetNotificationTitle(), @@ -78,6 +83,7 @@ func (this *EmailNotifier) Notify(evalContext *alerting.EvalContext) error { "Name": evalContext.Rule.Name, "StateModel": evalContext.GetStateModel(), "Message": evalContext.Rule.Message, + "Error": error, "RuleUrl": ruleUrl, "ImageLink": "", "EmbededImage": "", diff --git a/pkg/services/alerting/notifiers/sensu.go b/pkg/services/alerting/notifiers/sensu.go index dbe31f4cf84..00661f864b3 100644 --- a/pkg/services/alerting/notifiers/sensu.go +++ b/pkg/services/alerting/notifiers/sensu.go @@ -1,14 +1,15 @@ package notifiers import ( + "strconv" + "strings" + "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/metrics" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/services/alerting" - "strconv" - "strings" ) func init() { @@ -23,6 +24,14 @@ func init() { Url +
    + Source + +
    +
    + Handler + +
    Username @@ -46,7 +55,9 @@ func NewSensuNotifier(model *m.AlertNotification) (alerting.Notifier, error) { NotifierBase: NewNotifierBase(model.Id, model.IsDefault, model.Name, model.Type, model.Settings), Url: url, User: model.Settings.Get("username").MustString(), + Source: model.Settings.Get("source").MustString(), Password: model.Settings.Get("password").MustString(), + Handler: model.Settings.Get("handler").MustString(), log: log.New("alerting.notifier.sensu"), }, nil } @@ -54,8 +65,10 @@ func NewSensuNotifier(model *m.AlertNotification) (alerting.Notifier, error) { type SensuNotifier struct { NotifierBase Url string + Source string User string Password string + Handler string log log.Logger } @@ -67,9 +80,13 @@ func (this *SensuNotifier) Notify(evalContext *alerting.EvalContext) error { bodyJSON.Set("ruleId", evalContext.Rule.Id) // Sensu alerts cannot have spaces in them bodyJSON.Set("name", strings.Replace(evalContext.Rule.Name, " ", "_", -1)) - // Sensu alerts require a command - // We set it to the grafana ruleID - bodyJSON.Set("source", "grafana_rule_"+strconv.FormatInt(evalContext.Rule.Id, 10)) + // Sensu alerts require a source. We set it to the user-specified value (optional), + // else we fallback and use the grafana ruleID. + if this.Source != "" { + bodyJSON.Set("source", this.Source) + } else { + bodyJSON.Set("source", "grafana_rule_"+strconv.FormatInt(evalContext.Rule.Id, 10)) + } // Finally, sensu expects an output // We set it to a default output bodyJSON.Set("output", "Grafana Metric Condition Met") @@ -83,6 +100,10 @@ func (this *SensuNotifier) Notify(evalContext *alerting.EvalContext) error { bodyJSON.Set("status", 0) } + if this.Handler != "" { + bodyJSON.Set("handler", this.Handler) + } + ruleUrl, err := evalContext.GetRuleUrl() if err == nil { bodyJSON.Set("ruleUrl", ruleUrl) diff --git a/pkg/services/alerting/notifiers/sensu_test.go b/pkg/services/alerting/notifiers/sensu_test.go index ffbdcfaf15c..40e3b1e1cc3 100644 --- a/pkg/services/alerting/notifiers/sensu_test.go +++ b/pkg/services/alerting/notifiers/sensu_test.go @@ -29,7 +29,9 @@ func TestSensuNotifier(t *testing.T) { Convey("from settings", func() { json := ` { - "url": "http://sensu-api.example.com:4567/results" + "url": "http://sensu-api.example.com:4567/results", + "source": "grafana_instance_01", + "handler": "myhandler" }` settingsJSON, _ := simplejson.NewJson([]byte(json)) @@ -46,6 +48,8 @@ func TestSensuNotifier(t *testing.T) { So(sensuNotifier.Name, ShouldEqual, "sensu") So(sensuNotifier.Type, ShouldEqual, "sensu") So(sensuNotifier.Url, ShouldEqual, "http://sensu-api.example.com:4567/results") + So(sensuNotifier.Source, ShouldEqual, "grafana_instance_01") + So(sensuNotifier.Handler, ShouldEqual, "myhandler") }) }) }) diff --git a/pkg/services/alerting/notifiers/telegram.go b/pkg/services/alerting/notifiers/telegram.go index f70d299bc6c..71169c15599 100644 --- a/pkg/services/alerting/notifiers/telegram.go +++ b/pkg/services/alerting/notifiers/telegram.go @@ -87,7 +87,7 @@ func (this *TelegramNotifier) Notify(evalContext *alerting.EvalContext) error { bodyJSON.Set("chat_id", this.ChatID) bodyJSON.Set("parse_mode", "html") - message := fmt.Sprintf("%s\nState: %s\nMessage: %s\n", evalContext.GetNotificationTitle(), evalContext.Rule.Name, evalContext.Rule.Message) + message := fmt.Sprintf("%s\nState: %s\nMessage: %s\n", evalContext.GetNotificationTitle(), evalContext.Rule.Name, evalContext.Rule.Message) ruleUrl, err := evalContext.GetRuleUrl() if err == nil { @@ -96,6 +96,19 @@ func (this *TelegramNotifier) Notify(evalContext *alerting.EvalContext) error { if evalContext.ImagePublicUrl != "" { message = message + fmt.Sprintf("Image: %s\n", evalContext.ImagePublicUrl) } + + metrics := "" + fieldLimitCount := 4 + for index, evt := range evalContext.EvalMatches { + metrics += fmt.Sprintf("\n%s: %s", evt.Metric, evt.Value) + if index > fieldLimitCount { + break + } + } + if metrics != "" { + message = message + fmt.Sprintf("\nMetrics:%s", metrics) + } + bodyJSON.Set("text", message) url := fmt.Sprintf(telegeramApiUrl, this.BotToken, "sendMessage") diff --git a/pkg/services/alerting/notifiers/victorops.go b/pkg/services/alerting/notifiers/victorops.go index a4e34a40b8a..2fbb2f64d1b 100644 --- a/pkg/services/alerting/notifiers/victorops.go +++ b/pkg/services/alerting/notifiers/victorops.go @@ -1,10 +1,10 @@ package notifiers import ( - "encoding/json" "time" "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/models" @@ -15,6 +15,8 @@ import ( // AlertStateCritical - Victorops uses "CRITICAL" string to indicate "Alerting" state const AlertStateCritical = "CRITICAL" +const AlertStateRecovery = "RECOVERY" + func init() { alerting.RegisterNotifier(&alerting.NotifierPlugin{ Type: "victorops", @@ -27,6 +29,15 @@ func init() { Url
    +
    + + +
    `, }) } @@ -34,6 +45,7 @@ func init() { // NewVictoropsNotifier creates an instance of VictoropsNotifier that // handles posting notifications to Victorops REST API func NewVictoropsNotifier(model *models.AlertNotification) (alerting.Notifier, error) { + autoResolve := model.Settings.Get("autoResolve").MustBool(true) url := model.Settings.Get("url").MustString() if url == "" { return nil, alerting.ValidationError{Reason: "Could not find victorops url property in settings"} @@ -42,6 +54,7 @@ func NewVictoropsNotifier(model *models.AlertNotification) (alerting.Notifier, e return &VictoropsNotifier{ NotifierBase: NewNotifierBase(model.Id, model.IsDefault, model.Name, model.Type, model.Settings), URL: url, + AutoResolve: autoResolve, log: log.New("alerting.notifier.victorops"), }, nil } @@ -51,8 +64,9 @@ func NewVictoropsNotifier(model *models.AlertNotification) (alerting.Notifier, e // Victorops specifications (http://victorops.force.com/knowledgebase/articles/Integration/Alert-Ingestion-API-Documentation/) type VictoropsNotifier struct { NotifierBase - URL string - log log.Logger + URL string + AutoResolve bool + log log.Logger } // Notify sends notification to Victorops via POST to URL endpoint @@ -66,6 +80,11 @@ func (this *VictoropsNotifier) Notify(evalContext *alerting.EvalContext) error { return err } + if evalContext.Rule.State == models.AlertStateOK && !this.AutoResolve { + this.log.Info("Not alerting VictorOps", "state", evalContext.Rule.State, "auto resolve", this.AutoResolve) + return nil + } + fields := make([]map[string]interface{}, 0) fieldLimitCount := 4 for index, evt := range evalContext.EvalMatches { @@ -92,20 +111,28 @@ func (this *VictoropsNotifier) Notify(evalContext *alerting.EvalContext) error { messageType = AlertStateCritical } - body := map[string]interface{}{ - "message_type": messageType, - "entity_id": evalContext.Rule.Name, - "timestamp": time.Now().Unix(), - "state_start_time": evalContext.StartTime.Unix(), - "state_message": evalContext.Rule.Message + "\n" + ruleUrl, - "monitoring_tool": "Grafana v" + setting.BuildVersion, + if evalContext.Rule.State == models.AlertStateOK { + messageType = AlertStateRecovery } - data, _ := json.Marshal(&body) + bodyJSON := simplejson.New() + bodyJSON.Set("message_type", messageType) + bodyJSON.Set("entity_id", evalContext.Rule.Name) + bodyJSON.Set("timestamp", time.Now().Unix()) + bodyJSON.Set("state_start_time", evalContext.StartTime.Unix()) + bodyJSON.Set("state_message", evalContext.Rule.Message) + bodyJSON.Set("monitoring_tool", "Grafana v"+setting.BuildVersion) + bodyJSON.Set("alert_url", ruleUrl) + + if evalContext.ImagePublicUrl != "" { + bodyJSON.Set("image_url", evalContext.ImagePublicUrl) + } + + data, _ := bodyJSON.MarshalJSON() cmd := &models.SendWebhookSync{Url: this.URL, Body: string(data)} if err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil { - this.log.Error("Failed to send victorops notification", "error", err, "webhook", this.Name) + this.log.Error("Failed to send Victorops notification", "error", err, "webhook", this.Name) return err } diff --git a/pkg/services/alerting/result_handler.go b/pkg/services/alerting/result_handler.go index 1298a5dda36..972fbd3a461 100644 --- a/pkg/services/alerting/result_handler.go +++ b/pkg/services/alerting/result_handler.go @@ -31,17 +31,15 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error { executionError := "" annotationData := simplejson.New() - if evalContext.Firing { - annotationData = simplejson.NewFromAny(evalContext.EvalMatches) + if len(evalContext.EvalMatches) > 0 { + annotationData.Set("evalMatches", simplejson.NewFromAny(evalContext.EvalMatches)) } if evalContext.Error != nil { executionError = evalContext.Error.Error() - annotationData.Set("errorMessage", executionError) - } - - if evalContext.NoDataFound { - annotationData.Set("no_data", true) + annotationData.Set("error", executionError) + } else if evalContext.NoDataFound { + annotationData.Set("noData", true) } countStateResult(evalContext.Rule.State) diff --git a/pkg/services/alerting/test_notification.go b/pkg/services/alerting/test_notification.go index 91b3814ac7b..7dc9a150d92 100644 --- a/pkg/services/alerting/test_notification.go +++ b/pkg/services/alerting/test_notification.go @@ -2,6 +2,7 @@ package alerting import ( "context" + "fmt" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/components/null" @@ -56,7 +57,7 @@ func createTestEvalContext(cmd *NotificationTestCommand) *EvalContext { } ctx.IsTestRun = true ctx.Firing = true - ctx.Error = nil + ctx.Error = fmt.Errorf("This is only a test") ctx.EvalMatches = evalMatchesBasedOnState() return ctx diff --git a/pkg/services/annotations/annotations.go b/pkg/services/annotations/annotations.go index d9d15bca34b..be9d3f2d4d0 100644 --- a/pkg/services/annotations/annotations.go +++ b/pkg/services/annotations/annotations.go @@ -4,6 +4,7 @@ import "github.com/grafana/grafana/pkg/components/simplejson" type Repository interface { Save(item *Item) error + Update(item *Item) error Find(query *ItemQuery) ([]*Item, error) Delete(params *DeleteParams) error } @@ -21,6 +22,14 @@ type ItemQuery struct { Limit int64 `json:"limit"` } +type PostParams struct { + DashboardId int64 `json:"dashboardId"` + PanelId int64 `json:"panelId"` + Epoch int64 `json:"epoch"` + Title string `json:"title"` + Text string `json:"text"` +} + type DeleteParams struct { AlertId int64 `json:"alertId"` DashboardId int64 `json:"dashboardId"` @@ -41,6 +50,7 @@ type ItemType string const ( AlertType ItemType = "alert" + EventType ItemType = "event" ) type Item struct { @@ -49,6 +59,7 @@ type Item struct { DashboardId int64 `json:"dashboardId"` PanelId int64 `json:"panelId"` CategoryId int64 `json:"categoryId"` + RegionId int64 `json:"regionId"` Type ItemType `json:"type"` Title string `json:"title"` Text string `json:"text"` diff --git a/pkg/services/notifications/mailer.go b/pkg/services/notifications/mailer.go index ae1348cbf99..df9f1138b15 100644 --- a/pkg/services/notifications/mailer.go +++ b/pkg/services/notifications/mailer.go @@ -107,7 +107,7 @@ func createDialer() (*gomail.Dialer, error) { func buildEmailMessage(cmd *m.SendEmailCommand) (*Message, error) { if !setting.Smtp.Enabled { - return nil, errors.New("Grafana mailing/smtp options not configured, contact your Grafana admin") + return nil, m.ErrSmtpNotEnabled } var buffer bytes.Buffer diff --git a/pkg/services/sqlstore/alert.go b/pkg/services/sqlstore/alert.go index 3fccba6267e..bc589f89c14 100644 --- a/pkg/services/sqlstore/alert.go +++ b/pkg/services/sqlstore/alert.go @@ -6,7 +6,6 @@ import ( "strings" "time" - "github.com/go-xorm/xorm" "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" ) @@ -48,7 +47,7 @@ func GetAllAlertQueryHandler(query *m.GetAllAlertsQuery) error { return nil } -func deleteAlertByIdInternal(alertId int64, reason string, sess *xorm.Session) error { +func deleteAlertByIdInternal(alertId int64, reason string, sess *DBSession) error { sqlog.Debug("Deleting alert", "id", alertId, "reason", reason) if _, err := sess.Exec("DELETE FROM alert WHERE id = ?", alertId); err != nil { @@ -63,7 +62,7 @@ func deleteAlertByIdInternal(alertId int64, reason string, sess *xorm.Session) e } func DeleteAlertById(cmd *m.DeleteAlertCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { return deleteAlertByIdInternal(cmd.AlertId, "DeleteAlertCommand", sess) }) } @@ -123,7 +122,7 @@ func HandleAlertsQuery(query *m.GetAlertsQuery) error { return nil } -func DeleteAlertDefinition(dashboardId int64, sess *xorm.Session) error { +func DeleteAlertDefinition(dashboardId int64, sess *DBSession) error { alerts := make([]*m.Alert, 0) sess.Where("dashboard_id = ?", dashboardId).Find(&alerts) @@ -135,7 +134,7 @@ func DeleteAlertDefinition(dashboardId int64, sess *xorm.Session) error { } func SaveAlerts(cmd *m.SaveAlertsCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { existingAlerts, err := GetAlertsByDashboardId2(cmd.DashboardId, sess) if err != nil { return err @@ -153,7 +152,7 @@ func SaveAlerts(cmd *m.SaveAlertsCommand) error { }) } -func upsertAlerts(existingAlerts []*m.Alert, cmd *m.SaveAlertsCommand, sess *xorm.Session) error { +func upsertAlerts(existingAlerts []*m.Alert, cmd *m.SaveAlertsCommand, sess *DBSession) error { for _, alert := range cmd.Alerts { update := false var alertToUpdate *m.Alert @@ -197,7 +196,7 @@ func upsertAlerts(existingAlerts []*m.Alert, cmd *m.SaveAlertsCommand, sess *xor return nil } -func deleteMissingAlerts(alerts []*m.Alert, cmd *m.SaveAlertsCommand, sess *xorm.Session) error { +func deleteMissingAlerts(alerts []*m.Alert, cmd *m.SaveAlertsCommand, sess *DBSession) error { for _, missingAlert := range alerts { missing := true @@ -216,7 +215,7 @@ func deleteMissingAlerts(alerts []*m.Alert, cmd *m.SaveAlertsCommand, sess *xorm return nil } -func GetAlertsByDashboardId2(dashboardId int64, sess *xorm.Session) ([]*m.Alert, error) { +func GetAlertsByDashboardId2(dashboardId int64, sess *DBSession) ([]*m.Alert, error) { alerts := make([]*m.Alert, 0) err := sess.Where("dashboard_id = ?", dashboardId).Find(&alerts) @@ -228,7 +227,7 @@ func GetAlertsByDashboardId2(dashboardId int64, sess *xorm.Session) ([]*m.Alert, } func SetAlertState(cmd *m.SetAlertStateCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { alert := m.Alert{} if has, err := sess.Id(cmd.AlertId).Get(&alert); err != nil { @@ -262,7 +261,7 @@ func SetAlertState(cmd *m.SetAlertStateCommand) error { } func PauseAlert(cmd *m.PauseAlertCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { if len(cmd.AlertIds) == 0 { return fmt.Errorf("command contains no alertids") } @@ -292,7 +291,7 @@ func PauseAlert(cmd *m.PauseAlertCommand) error { } func PauseAllAlerts(cmd *m.PauseAllAlertCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var newState string if cmd.Paused { newState = string(m.AlertStatePaused) diff --git a/pkg/services/sqlstore/alert_notification.go b/pkg/services/sqlstore/alert_notification.go index 5acb53c3c09..5e66627f194 100644 --- a/pkg/services/sqlstore/alert_notification.go +++ b/pkg/services/sqlstore/alert_notification.go @@ -6,7 +6,6 @@ import ( "strings" "time" - "github.com/go-xorm/xorm" "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" ) @@ -21,7 +20,7 @@ func init() { } func DeleteAlertNotification(cmd *m.DeleteAlertNotificationCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { sql := "DELETE FROM alert_notification WHERE alert_notification.org_id = ? AND alert_notification.id = ?" _, err := sess.Exec(sql, cmd.OrgId, cmd.Id) @@ -34,7 +33,7 @@ func DeleteAlertNotification(cmd *m.DeleteAlertNotificationCommand) error { } func GetAlertNotifications(query *m.GetAlertNotificationsQuery) error { - return getAlertNotificationInternal(query, x.NewSession()) + return getAlertNotificationInternal(query, newSession()) } func GetAllAlertNotifications(query *m.GetAllAlertNotificationsQuery) error { @@ -85,7 +84,7 @@ func GetAlertNotificationsToSend(query *m.GetAlertNotificationsToSendQuery) erro return nil } -func getAlertNotificationInternal(query *m.GetAlertNotificationsQuery, sess *xorm.Session) error { +func getAlertNotificationInternal(query *m.GetAlertNotificationsQuery, sess *DBSession) error { var sql bytes.Buffer params := make([]interface{}, 0) @@ -131,7 +130,7 @@ func getAlertNotificationInternal(query *m.GetAlertNotificationsQuery, sess *xor } func CreateAlertNotificationCommand(cmd *m.CreateAlertNotificationCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { existingQuery := &m.GetAlertNotificationsQuery{OrgId: cmd.OrgId, Name: cmd.Name} err := getAlertNotificationInternal(existingQuery, sess) @@ -163,7 +162,7 @@ func CreateAlertNotificationCommand(cmd *m.CreateAlertNotificationCommand) error } func UpdateAlertNotification(cmd *m.UpdateAlertNotificationCommand) error { - return inTransaction(func(sess *xorm.Session) (err error) { + return inTransaction(func(sess *DBSession) (err error) { current := m.AlertNotification{} if _, err = sess.Id(cmd.Id).Get(¤t); err != nil { diff --git a/pkg/services/sqlstore/annotation.go b/pkg/services/sqlstore/annotation.go index e219f48d2fe..ffad5bf2cad 100644 --- a/pkg/services/sqlstore/annotation.go +++ b/pkg/services/sqlstore/annotation.go @@ -5,7 +5,6 @@ import ( "fmt" "strings" - "github.com/go-xorm/xorm" "github.com/grafana/grafana/pkg/services/annotations" ) @@ -13,7 +12,7 @@ type SqlAnnotationRepo struct { } func (r *SqlAnnotationRepo) Save(item *annotations.Item) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { if _, err := sess.Table("annotation").Insert(item); err != nil { return err @@ -23,6 +22,17 @@ func (r *SqlAnnotationRepo) Save(item *annotations.Item) error { }) } +func (r *SqlAnnotationRepo) Update(item *annotations.Item) error { + return inTransaction(func(sess *DBSession) error { + + if _, err := sess.Table("annotation").Id(item.Id).Update(item); err != nil { + return err + } + + return nil + }) +} + func (r *SqlAnnotationRepo) Find(query *annotations.ItemQuery) ([]*annotations.Item, error) { var sql bytes.Buffer params := make([]interface{}, 0) @@ -86,7 +96,7 @@ func (r *SqlAnnotationRepo) Find(query *annotations.ItemQuery) ([]*annotations.I } func (r *SqlAnnotationRepo) Delete(params *annotations.DeleteParams) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { sql := "DELETE FROM annotation WHERE dashboard_id = ? AND panel_id = ?" diff --git a/pkg/services/sqlstore/apikey.go b/pkg/services/sqlstore/apikey.go index 1cca7b5e40b..0532f636625 100644 --- a/pkg/services/sqlstore/apikey.go +++ b/pkg/services/sqlstore/apikey.go @@ -3,7 +3,6 @@ package sqlstore import ( "time" - "github.com/go-xorm/xorm" "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" ) @@ -24,7 +23,7 @@ func GetApiKeys(query *m.GetApiKeysQuery) error { } func DeleteApiKey(cmd *m.DeleteApiKeyCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var rawSql = "DELETE FROM api_key WHERE id=? and org_id=?" _, err := sess.Exec(rawSql, cmd.Id, cmd.OrgId) return err @@ -32,7 +31,7 @@ func DeleteApiKey(cmd *m.DeleteApiKeyCommand) error { } func AddApiKey(cmd *m.AddApiKeyCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { t := m.ApiKey{ OrgId: cmd.OrgId, Name: cmd.Name, diff --git a/pkg/services/sqlstore/dashboard.go b/pkg/services/sqlstore/dashboard.go index 7bd65ac4da8..50b02bf0970 100644 --- a/pkg/services/sqlstore/dashboard.go +++ b/pkg/services/sqlstore/dashboard.go @@ -3,8 +3,8 @@ package sqlstore import ( "bytes" "fmt" + "time" - "github.com/go-xorm/xorm" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/metrics" m "github.com/grafana/grafana/pkg/models" @@ -23,7 +23,7 @@ func init() { } func SaveDashboard(cmd *m.SaveDashboardCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { dash := cmd.GetDashboardModel() // try get existing dashboard @@ -63,16 +63,20 @@ func SaveDashboard(cmd *m.SaveDashboardCommand) error { if dash.Id != sameTitle.Id { if cmd.Overwrite { dash.Id = sameTitle.Id + dash.Version = sameTitle.Version } else { return m.ErrDashboardWithSameNameExists } } } + parentVersion := dash.Version affectedRows := int64(0) if dash.Id == 0 { + dash.Version = 1 metrics.M_Models_Dashboard_Insert.Inc(1) + dash.Data.Set("version", dash.Version) affectedRows, err = sess.Insert(dash) } else { dash.Version += 1 @@ -80,10 +84,32 @@ func SaveDashboard(cmd *m.SaveDashboardCommand) error { affectedRows, err = sess.Id(dash.Id).Update(dash) } + if err != nil { + return err + } + if affectedRows == 0 { return m.ErrDashboardNotFound } + dashVersion := &m.DashboardVersion{ + DashboardId: dash.Id, + ParentVersion: parentVersion, + RestoredFrom: cmd.RestoredFrom, + Version: dash.Version, + Created: time.Now(), + CreatedBy: dash.UpdatedBy, + Message: cmd.Message, + Data: dash.Data, + } + + // insert version entry + if affectedRows, err = sess.Insert(dashVersion); err != nil { + return err + } else if affectedRows == 0 { + return m.ErrDashboardNotFound + } + // delete existing tabs _, err = sess.Exec("DELETE FROM dashboard_tag WHERE dashboard_id=?", dash.Id) if err != nil { @@ -107,8 +133,9 @@ func SaveDashboard(cmd *m.SaveDashboardCommand) error { } func GetDashboard(query *m.GetDashboardQuery) error { - dashboard := m.Dashboard{Slug: query.Slug, OrgId: query.OrgId} + dashboard := m.Dashboard{Slug: query.Slug, OrgId: query.OrgId, Id: query.Id} has, err := x.Get(&dashboard) + if err != nil { return err } else if has == false { @@ -117,7 +144,6 @@ func GetDashboard(query *m.GetDashboardQuery) error { dashboard.Data.Set("id", dashboard.Id) query.Result = &dashboard - return nil } @@ -220,7 +246,7 @@ func GetDashboardTags(query *m.GetDashboardTagsQuery) error { } func DeleteDashboard(cmd *m.DeleteDashboardCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { dashboard := m.Dashboard{Slug: cmd.Slug, OrgId: cmd.OrgId} has, err := sess.Get(&dashboard) if err != nil { @@ -234,6 +260,7 @@ func DeleteDashboard(cmd *m.DeleteDashboardCommand) error { "DELETE FROM star WHERE dashboard_id = ? ", "DELETE FROM dashboard WHERE id = ?", "DELETE FROM playlist_item WHERE type = 'dashboard_by_id' AND value = ?", + "DELETE FROM dashboard_version WHERE dashboard_id = ?", } for _, sql := range deletes { @@ -243,7 +270,7 @@ func DeleteDashboard(cmd *m.DeleteDashboardCommand) error { } } - if err := DeleteAlertDefinition(dashboard.Id, sess.Session); err != nil { + if err := DeleteAlertDefinition(dashboard.Id, sess); err != nil { return nil } diff --git a/pkg/services/sqlstore/dashboard_snapshot.go b/pkg/services/sqlstore/dashboard_snapshot.go index bb3a4f8f57e..810189b3246 100644 --- a/pkg/services/sqlstore/dashboard_snapshot.go +++ b/pkg/services/sqlstore/dashboard_snapshot.go @@ -3,7 +3,6 @@ package sqlstore import ( "time" - "github.com/go-xorm/xorm" "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" @@ -18,7 +17,7 @@ func init() { } func DeleteExpiredSnapshots(cmd *m.DeleteExpiredSnapshotsCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var expiredCount int64 = 0 if setting.SnapShotRemoveExpired { @@ -36,7 +35,7 @@ func DeleteExpiredSnapshots(cmd *m.DeleteExpiredSnapshotsCommand) error { } func CreateDashboardSnapshot(cmd *m.CreateDashboardSnapshotCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { // never var expires = time.Now().Add(time.Hour * 24 * 365 * 50) @@ -65,7 +64,7 @@ func CreateDashboardSnapshot(cmd *m.CreateDashboardSnapshotCommand) error { } func DeleteDashboardSnapshot(cmd *m.DeleteDashboardSnapshotCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var rawSql = "DELETE FROM dashboard_snapshot WHERE delete_key=?" _, err := sess.Exec(rawSql, cmd.DeleteKey) return err diff --git a/pkg/services/sqlstore/dashboard_version.go b/pkg/services/sqlstore/dashboard_version.go new file mode 100644 index 00000000000..0b296650b4b --- /dev/null +++ b/pkg/services/sqlstore/dashboard_version.go @@ -0,0 +1,60 @@ +package sqlstore + +import ( + "github.com/grafana/grafana/pkg/bus" + m "github.com/grafana/grafana/pkg/models" +) + +func init() { + bus.AddHandler("sql", GetDashboardVersion) + bus.AddHandler("sql", GetDashboardVersions) +} + +// GetDashboardVersion gets the dashboard version for the given dashboard ID and version number. +func GetDashboardVersion(query *m.GetDashboardVersionQuery) error { + version := m.DashboardVersion{} + has, err := x.Where("dashboard_version.dashboard_id=? AND dashboard_version.version=? AND dashboard.org_id=?", query.DashboardId, query.Version, query.OrgId). + Join("LEFT", "dashboard", `dashboard.id = dashboard_version.dashboard_id`). + Get(&version) + + if err != nil { + return err + } + + if !has { + return m.ErrDashboardVersionNotFound + } + + version.Data.Set("id", version.DashboardId) + query.Result = &version + return nil +} + +// GetDashboardVersions gets all dashboard versions for the given dashboard ID. +func GetDashboardVersions(query *m.GetDashboardVersionsQuery) error { + err := x.Table("dashboard_version"). + Select(`dashboard_version.id, + dashboard_version.dashboard_id, + dashboard_version.parent_version, + dashboard_version.restored_from, + dashboard_version.version, + dashboard_version.created, + dashboard_version.created_by as created_by_id, + dashboard_version.message, + dashboard_version.data,`+ + dialect.Quote("user")+`.login as created_by`). + Join("LEFT", "user", `dashboard_version.created_by = `+dialect.Quote("user")+`.id`). + Join("LEFT", "dashboard", `dashboard.id = dashboard_version.dashboard_id`). + Where("dashboard_version.dashboard_id=? AND dashboard.org_id=?", query.DashboardId, query.OrgId). + OrderBy("dashboard_version.version DESC"). + Limit(query.Limit, query.Start). + Find(&query.Result) + if err != nil { + return err + } + + if len(query.Result) < 1 { + return m.ErrNoVersionsForDashboardId + } + return nil +} diff --git a/pkg/services/sqlstore/dashboard_version_test.go b/pkg/services/sqlstore/dashboard_version_test.go new file mode 100644 index 00000000000..8ac636c1682 --- /dev/null +++ b/pkg/services/sqlstore/dashboard_version_test.go @@ -0,0 +1,103 @@ +package sqlstore + +import ( + "reflect" + "testing" + + . "github.com/smartystreets/goconvey/convey" + + "github.com/grafana/grafana/pkg/components/simplejson" + m "github.com/grafana/grafana/pkg/models" +) + +func updateTestDashboard(dashboard *m.Dashboard, data map[string]interface{}) { + data["title"] = dashboard.Title + + saveCmd := m.SaveDashboardCommand{ + OrgId: dashboard.OrgId, + Overwrite: true, + Dashboard: simplejson.NewFromAny(data), + } + + err := SaveDashboard(&saveCmd) + So(err, ShouldBeNil) +} + +func TestGetDashboardVersion(t *testing.T) { + Convey("Testing dashboard version retrieval", t, func() { + InitTestDB(t) + + Convey("Get a Dashboard ID and version ID", func() { + savedDash := insertTestDashboard("test dash 26", 1, "diff") + + query := m.GetDashboardVersionQuery{ + DashboardId: savedDash.Id, + Version: savedDash.Version, + OrgId: 1, + } + + err := GetDashboardVersion(&query) + So(err, ShouldBeNil) + So(savedDash.Id, ShouldEqual, query.DashboardId) + So(savedDash.Version, ShouldEqual, query.Version) + + dashCmd := m.GetDashboardQuery{ + OrgId: savedDash.OrgId, + Slug: savedDash.Slug, + } + + err = GetDashboard(&dashCmd) + So(err, ShouldBeNil) + eq := reflect.DeepEqual(dashCmd.Result.Data, query.Result.Data) + So(eq, ShouldEqual, true) + }) + + Convey("Attempt to get a version that doesn't exist", func() { + query := m.GetDashboardVersionQuery{ + DashboardId: int64(999), + Version: 123, + OrgId: 1, + } + + err := GetDashboardVersion(&query) + So(err, ShouldNotBeNil) + So(err, ShouldEqual, m.ErrDashboardVersionNotFound) + }) + }) +} + +func TestGetDashboardVersions(t *testing.T) { + Convey("Testing dashboard versions retrieval", t, func() { + InitTestDB(t) + savedDash := insertTestDashboard("test dash 43", 1, "diff-all") + + Convey("Get all versions for a given Dashboard ID", func() { + query := m.GetDashboardVersionsQuery{DashboardId: savedDash.Id, OrgId: 1} + + err := GetDashboardVersions(&query) + So(err, ShouldBeNil) + So(len(query.Result), ShouldEqual, 1) + }) + + Convey("Attempt to get the versions for a non-existent Dashboard ID", func() { + query := m.GetDashboardVersionsQuery{DashboardId: int64(999), OrgId: 1} + + err := GetDashboardVersions(&query) + So(err, ShouldNotBeNil) + So(err, ShouldEqual, m.ErrNoVersionsForDashboardId) + So(len(query.Result), ShouldEqual, 0) + }) + + Convey("Get all versions for an updated dashboard", func() { + updateTestDashboard(savedDash, map[string]interface{}{ + "tags": "different-tag", + }) + + query := m.GetDashboardVersionsQuery{DashboardId: savedDash.Id, OrgId: 1} + err := GetDashboardVersions(&query) + + So(err, ShouldBeNil) + So(len(query.Result), ShouldEqual, 2) + }) + }) +} diff --git a/pkg/services/sqlstore/datasource.go b/pkg/services/sqlstore/datasource.go index de838163681..831bead2360 100644 --- a/pkg/services/sqlstore/datasource.go +++ b/pkg/services/sqlstore/datasource.go @@ -6,8 +6,6 @@ import ( "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/components/securejsondata" m "github.com/grafana/grafana/pkg/models" - - "github.com/go-xorm/xorm" ) func init() { @@ -52,7 +50,7 @@ func GetDataSources(query *m.GetDataSourcesQuery) error { } func DeleteDataSourceById(cmd *m.DeleteDataSourceByIdCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var rawSql = "DELETE FROM data_source WHERE id=? and org_id=?" _, err := sess.Exec(rawSql, cmd.Id, cmd.OrgId) return err @@ -60,7 +58,7 @@ func DeleteDataSourceById(cmd *m.DeleteDataSourceByIdCommand) error { } func DeleteDataSourceByName(cmd *m.DeleteDataSourceByNameCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var rawSql = "DELETE FROM data_source WHERE name=? and org_id=?" _, err := sess.Exec(rawSql, cmd.Name, cmd.OrgId) return err @@ -69,7 +67,7 @@ func DeleteDataSourceByName(cmd *m.DeleteDataSourceByNameCommand) error { func AddDataSource(cmd *m.AddDataSourceCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { existing := m.DataSource{OrgId: cmd.OrgId, Name: cmd.Name} has, _ := sess.Get(&existing) @@ -109,7 +107,7 @@ func AddDataSource(cmd *m.AddDataSourceCommand) error { }) } -func updateIsDefaultFlag(ds *m.DataSource, sess *xorm.Session) error { +func updateIsDefaultFlag(ds *m.DataSource, sess *DBSession) error { // Handle is default flag if ds.IsDefault { rawSql := "UPDATE data_source SET is_default=? WHERE org_id=? AND id <> ?" @@ -122,7 +120,7 @@ func updateIsDefaultFlag(ds *m.DataSource, sess *xorm.Session) error { func UpdateDataSource(cmd *m.UpdateDataSourceCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { ds := &m.DataSource{ Id: cmd.Id, OrgId: cmd.OrgId, @@ -141,6 +139,7 @@ func UpdateDataSource(cmd *m.UpdateDataSourceCommand) error { JsonData: cmd.JsonData, SecureJsonData: securejsondata.GetEncryptedJsonData(cmd.SecureJsonData), Updated: time.Now(), + Version: cmd.Version + 1, } sess.UseBool("is_default") diff --git a/pkg/services/sqlstore/health.go b/pkg/services/sqlstore/health.go new file mode 100644 index 00000000000..ca5a8d2d085 --- /dev/null +++ b/pkg/services/sqlstore/health.go @@ -0,0 +1,14 @@ +package sqlstore + +import ( + "github.com/grafana/grafana/pkg/bus" + m "github.com/grafana/grafana/pkg/models" +) + +func init() { + bus.AddHandler("sql", GetDBHealthQuery) +} + +func GetDBHealthQuery(query *m.GetDBHealthQuery) error { + return x.Ping() +} diff --git a/pkg/services/sqlstore/logger.go b/pkg/services/sqlstore/logger.go index ae1145c21b0..9b0b068c918 100644 --- a/pkg/services/sqlstore/logger.go +++ b/pkg/services/sqlstore/logger.go @@ -23,67 +23,59 @@ func NewXormLogger(level glog.Lvl, grafanaLog glog.Logger) *XormLogger { } // Error implement core.ILogger -func (s *XormLogger) Err(v ...interface{}) error { +func (s *XormLogger) Error(v ...interface{}) { if s.level <= glog.LvlError { s.grafanaLog.Error(fmt.Sprint(v...)) } - return nil } // Errorf implement core.ILogger -func (s *XormLogger) Errf(format string, v ...interface{}) error { +func (s *XormLogger) Errorf(format string, v ...interface{}) { if s.level <= glog.LvlError { s.grafanaLog.Error(fmt.Sprintf(format, v...)) } - return nil } // Debug implement core.ILogger -func (s *XormLogger) Debug(v ...interface{}) error { +func (s *XormLogger) Debug(v ...interface{}) { if s.level <= glog.LvlDebug { s.grafanaLog.Debug(fmt.Sprint(v...)) } - return nil } // Debugf implement core.ILogger -func (s *XormLogger) Debugf(format string, v ...interface{}) error { +func (s *XormLogger) Debugf(format string, v ...interface{}) { if s.level <= glog.LvlDebug { s.grafanaLog.Debug(fmt.Sprintf(format, v...)) } - return nil } // Info implement core.ILogger -func (s *XormLogger) Info(v ...interface{}) error { +func (s *XormLogger) Info(v ...interface{}) { if s.level <= glog.LvlInfo { s.grafanaLog.Info(fmt.Sprint(v...)) } - return nil } // Infof implement core.ILogger -func (s *XormLogger) Infof(format string, v ...interface{}) error { +func (s *XormLogger) Infof(format string, v ...interface{}) { if s.level <= glog.LvlInfo { s.grafanaLog.Info(fmt.Sprintf(format, v...)) } - return nil } // Warn implement core.ILogger -func (s *XormLogger) Warning(v ...interface{}) error { +func (s *XormLogger) Warn(v ...interface{}) { if s.level <= glog.LvlWarn { s.grafanaLog.Warn(fmt.Sprint(v...)) } - return nil } // Warnf implement core.ILogger -func (s *XormLogger) Warningf(format string, v ...interface{}) error { +func (s *XormLogger) Warnf(format string, v ...interface{}) { if s.level <= glog.LvlWarn { s.grafanaLog.Warn(fmt.Sprintf(format, v...)) } - return nil } // Level implement core.ILogger @@ -103,8 +95,7 @@ func (s *XormLogger) Level() core.LogLevel { } // SetLevel implement core.ILogger -func (s *XormLogger) SetLevel(l core.LogLevel) error { - return nil +func (s *XormLogger) SetLevel(l core.LogLevel) { } // ShowSQL implement core.ILogger diff --git a/pkg/services/sqlstore/migrations/alert_mig.go b/pkg/services/sqlstore/migrations/alert_mig.go index 7b6558656f1..2a364d5f464 100644 --- a/pkg/services/sqlstore/migrations/alert_mig.go +++ b/pkg/services/sqlstore/migrations/alert_mig.go @@ -16,7 +16,7 @@ func addAlertMigrations(mg *Migrator) { {Name: "org_id", Type: DB_BigInt, Nullable: false}, {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "message", Type: DB_Text, Nullable: false}, - {Name: "state", Type: DB_NVarchar, Length: 255, Nullable: false}, + {Name: "state", Type: DB_NVarchar, Length: 190, Nullable: false}, {Name: "settings", Type: DB_Text, Nullable: false}, {Name: "frequency", Type: DB_BigInt, Nullable: false}, {Name: "handler", Type: DB_BigInt, Nullable: false}, @@ -70,7 +70,7 @@ func addAlertMigrations(mg *Migrator) { mg.AddMigration("Update alert table charset", NewTableCharsetMigration("alert", []*Column{ {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "message", Type: DB_Text, Nullable: false}, - {Name: "state", Type: DB_NVarchar, Length: 255, Nullable: false}, + {Name: "state", Type: DB_NVarchar, Length: 190, Nullable: false}, {Name: "settings", Type: DB_Text, Nullable: false}, {Name: "severity", Type: DB_Text, Nullable: false}, {Name: "execution_error", Type: DB_Text, Nullable: false}, diff --git a/pkg/services/sqlstore/migrations/annotation_mig.go b/pkg/services/sqlstore/migrations/annotation_mig.go index 4a7206f9d64..a9343266863 100644 --- a/pkg/services/sqlstore/migrations/annotation_mig.go +++ b/pkg/services/sqlstore/migrations/annotation_mig.go @@ -35,7 +35,6 @@ func addAnnotationMig(mg *Migrator) { } mg.AddMigration("Drop old annotation table v4", NewDropTableMigration("annotation")) - mg.AddMigration("create annotation table v5", NewAddTableMigration(table)) // create indices @@ -54,4 +53,8 @@ func addAnnotationMig(mg *Migrator) { {Name: "new_state", Type: DB_NVarchar, Length: 25, Nullable: false}, {Name: "data", Type: DB_Text, Nullable: false}, })) + + mg.AddMigration("Add column region_id to annotation table", NewAddColumnMigration(table, &Column{ + Name: "region_id", Type: DB_BigInt, Nullable: true, Default: "0", + })) } diff --git a/pkg/services/sqlstore/migrations/dashboard_mig.go b/pkg/services/sqlstore/migrations/dashboard_mig.go index 0ef2f3be54f..ee0cc1d893f 100644 --- a/pkg/services/sqlstore/migrations/dashboard_mig.go +++ b/pkg/services/sqlstore/migrations/dashboard_mig.go @@ -8,7 +8,7 @@ func addDashboardMigration(mg *Migrator) { Columns: []*Column{ {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, {Name: "version", Type: DB_Int, Nullable: false}, - {Name: "slug", Type: DB_NVarchar, Length: 190, Nullable: false}, + {Name: "slug", Type: DB_NVarchar, Length: 189, Nullable: false}, {Name: "title", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "data", Type: DB_Text, Nullable: false}, {Name: "account_id", Type: DB_BigInt, Nullable: false}, @@ -56,7 +56,7 @@ func addDashboardMigration(mg *Migrator) { Columns: []*Column{ {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, {Name: "version", Type: DB_Int, Nullable: false}, - {Name: "slug", Type: DB_NVarchar, Length: 190, Nullable: false}, + {Name: "slug", Type: DB_NVarchar, Length: 189, Nullable: false}, {Name: "title", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "data", Type: DB_Text, Nullable: false}, {Name: "org_id", Type: DB_BigInt, Nullable: false}, @@ -114,7 +114,7 @@ func addDashboardMigration(mg *Migrator) { // add column to store plugin_id mg.AddMigration("Add column plugin_id in dashboard", NewAddColumnMigration(dashboardV2, &Column{ - Name: "plugin_id", Type: DB_NVarchar, Nullable: true, Length: 255, + Name: "plugin_id", Type: DB_NVarchar, Nullable: true, Length: 189, })) mg.AddMigration("Add index for plugin_id in dashboard", NewAddIndexMigration(dashboardV2, &Index{ @@ -127,9 +127,9 @@ func addDashboardMigration(mg *Migrator) { })) mg.AddMigration("Update dashboard table charset", NewTableCharsetMigration("dashboard", []*Column{ - {Name: "slug", Type: DB_NVarchar, Length: 190, Nullable: false}, + {Name: "slug", Type: DB_NVarchar, Length: 189, Nullable: false}, {Name: "title", Type: DB_NVarchar, Length: 255, Nullable: false}, - {Name: "plugin_id", Type: DB_NVarchar, Nullable: true, Length: 255}, + {Name: "plugin_id", Type: DB_NVarchar, Nullable: true, Length: 189}, {Name: "data", Type: DB_MediumText, Nullable: false}, })) diff --git a/pkg/services/sqlstore/migrations/dashboard_version_mig.go b/pkg/services/sqlstore/migrations/dashboard_version_mig.go new file mode 100644 index 00000000000..fee69b9ef4c --- /dev/null +++ b/pkg/services/sqlstore/migrations/dashboard_version_mig.go @@ -0,0 +1,61 @@ +package migrations + +import . "github.com/grafana/grafana/pkg/services/sqlstore/migrator" + +func addDashboardVersionMigration(mg *Migrator) { + dashboardVersionV1 := Table{ + Name: "dashboard_version", + Columns: []*Column{ + {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, + {Name: "dashboard_id", Type: DB_BigInt}, + {Name: "parent_version", Type: DB_Int, Nullable: false}, + {Name: "restored_from", Type: DB_Int, Nullable: false}, + {Name: "version", Type: DB_Int, Nullable: false}, + {Name: "created", Type: DB_DateTime, Nullable: false}, + {Name: "created_by", Type: DB_BigInt, Nullable: false}, + {Name: "message", Type: DB_Text, Nullable: false}, + {Name: "data", Type: DB_Text, Nullable: false}, + }, + Indices: []*Index{ + {Cols: []string{"dashboard_id"}}, + {Cols: []string{"dashboard_id", "version"}, Type: UniqueIndex}, + }, + } + + mg.AddMigration("create dashboard_version table v1", NewAddTableMigration(dashboardVersionV1)) + mg.AddMigration("add index dashboard_version.dashboard_id", NewAddIndexMigration(dashboardVersionV1, dashboardVersionV1.Indices[0])) + mg.AddMigration("add unique index dashboard_version.dashboard_id and dashboard_version.version", NewAddIndexMigration(dashboardVersionV1, dashboardVersionV1.Indices[1])) + + // before new dashboards where created with version 0, now they are always inserted with version 1 + const setVersionTo1WhereZeroSQL = `UPDATE dashboard SET version = 1 WHERE version = 0` + mg.AddMigration("Set dashboard version to 1 where 0", new(RawSqlMigration). + Sqlite(setVersionTo1WhereZeroSQL). + Postgres(setVersionTo1WhereZeroSQL). + Mysql(setVersionTo1WhereZeroSQL)) + + const rawSQL = `INSERT INTO dashboard_version +( + dashboard_id, + version, + parent_version, + restored_from, + created, + created_by, + message, + data +) +SELECT + dashboard.id, + dashboard.version, + dashboard.version, + dashboard.version, + dashboard.updated, + dashboard.updated_by, + '', + dashboard.data +FROM dashboard;` + mg.AddMigration("save existing dashboard data in dashboard_version table v1", new(RawSqlMigration). + Sqlite(rawSQL). + Postgres(rawSQL). + Mysql(rawSQL)) +} diff --git a/pkg/services/sqlstore/migrations/migrations.go b/pkg/services/sqlstore/migrations/migrations.go index 163c6d762a8..38072fe88e4 100644 --- a/pkg/services/sqlstore/migrations/migrations.go +++ b/pkg/services/sqlstore/migrations/migrations.go @@ -24,8 +24,8 @@ func AddMigrations(mg *Migrator) { addPreferencesMigrations(mg) addAlertMigrations(mg) addAnnotationMig(mg) - addStatsMigrations(mg) addTestDataMigrations(mg) + addDashboardVersionMigration(mg) } func addMigrationLogMigrations(mg *Migrator) { diff --git a/pkg/services/sqlstore/migrations/temp_user.go b/pkg/services/sqlstore/migrations/temp_user.go index 5592ab7e4ad..3913b18b3d8 100644 --- a/pkg/services/sqlstore/migrations/temp_user.go +++ b/pkg/services/sqlstore/migrations/temp_user.go @@ -9,10 +9,10 @@ func addTempUserMigrations(mg *Migrator) { {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, {Name: "org_id", Type: DB_BigInt, Nullable: false}, {Name: "version", Type: DB_Int, Nullable: false}, - {Name: "email", Type: DB_NVarchar, Length: 255}, + {Name: "email", Type: DB_NVarchar, Length: 190}, {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: true}, {Name: "role", Type: DB_NVarchar, Length: 20, Nullable: true}, - {Name: "code", Type: DB_NVarchar, Length: 255}, + {Name: "code", Type: DB_NVarchar, Length: 190}, {Name: "status", Type: DB_Varchar, Length: 20}, {Name: "invited_by_user_id", Type: DB_BigInt, Nullable: true}, {Name: "email_sent", Type: DB_Bool}, @@ -37,10 +37,10 @@ func addTempUserMigrations(mg *Migrator) { addTableIndicesMigrations(mg, "v1-7", tempUserV1) mg.AddMigration("Update temp_user table charset", NewTableCharsetMigration("temp_user", []*Column{ - {Name: "email", Type: DB_NVarchar, Length: 255}, + {Name: "email", Type: DB_NVarchar, Length: 190}, {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: true}, {Name: "role", Type: DB_NVarchar, Length: 20, Nullable: true}, - {Name: "code", Type: DB_NVarchar, Length: 255}, + {Name: "code", Type: DB_NVarchar, Length: 190}, {Name: "status", Type: DB_Varchar, Length: 20}, {Name: "remote_addr", Type: DB_Varchar, Length: 255, Nullable: true}, })) diff --git a/pkg/services/sqlstore/org.go b/pkg/services/sqlstore/org.go index 919bb6fd026..8931f1cf0f5 100644 --- a/pkg/services/sqlstore/org.go +++ b/pkg/services/sqlstore/org.go @@ -63,7 +63,7 @@ func GetOrgByName(query *m.GetOrgByNameQuery) error { return nil } -func isOrgNameTaken(name string, existingId int64, sess *session) (bool, error) { +func isOrgNameTaken(name string, existingId int64, sess *DBSession) (bool, error) { // check if org name is taken var org m.Org exists, err := sess.Where("name=?", name).Get(&org) @@ -80,7 +80,7 @@ func isOrgNameTaken(name string, existingId int64, sess *session) (bool, error) } func CreateOrg(cmd *m.CreateOrgCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { if isNameTaken, err := isOrgNameTaken(cmd.Name, 0, sess); err != nil { return err @@ -120,7 +120,7 @@ func CreateOrg(cmd *m.CreateOrgCommand) error { } func UpdateOrg(cmd *m.UpdateOrgCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { if isNameTaken, err := isOrgNameTaken(cmd.Name, cmd.OrgId, sess); err != nil { return err @@ -154,7 +154,7 @@ func UpdateOrg(cmd *m.UpdateOrgCommand) error { } func UpdateOrgAddress(cmd *m.UpdateOrgAddressCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { org := m.Org{ Address1: cmd.Address1, Address2: cmd.Address2, @@ -181,7 +181,7 @@ func UpdateOrgAddress(cmd *m.UpdateOrgAddressCommand) error { } func DeleteOrg(cmd *m.DeleteOrgCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { if res, err := sess.Query("SELECT 1 from org WHERE id=?", cmd.Id); err != nil { return err } else if len(res) != 1 { diff --git a/pkg/services/sqlstore/org_users.go b/pkg/services/sqlstore/org_users.go index 11ea558b0ce..e1b9dcc1da7 100644 --- a/pkg/services/sqlstore/org_users.go +++ b/pkg/services/sqlstore/org_users.go @@ -4,8 +4,6 @@ import ( "fmt" "time" - "github.com/go-xorm/xorm" - "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" ) @@ -18,7 +16,7 @@ func init() { } func AddOrgUser(cmd *m.AddOrgUserCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { // check if user exists if res, err := sess.Query("SELECT 1 from org_user WHERE org_id=? and user_id=?", cmd.OrgId, cmd.UserId); err != nil { return err @@ -46,7 +44,7 @@ func AddOrgUser(cmd *m.AddOrgUserCommand) error { } func UpdateOrgUser(cmd *m.UpdateOrgUserCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var orgUser m.OrgUser exists, err := sess.Where("org_id=? AND user_id=?", cmd.OrgId, cmd.UserId).Get(&orgUser) if err != nil { @@ -81,7 +79,7 @@ func GetOrgUsers(query *m.GetOrgUsersQuery) error { } func RemoveOrgUser(cmd *m.RemoveOrgUserCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var rawSql = "DELETE FROM org_user WHERE org_id=? and user_id=?" _, err := sess.Exec(rawSql, cmd.OrgId, cmd.UserId) if err != nil { @@ -92,7 +90,7 @@ func RemoveOrgUser(cmd *m.RemoveOrgUserCommand) error { }) } -func validateOneAdminLeftInOrg(orgId int64, sess *xorm.Session) error { +func validateOneAdminLeftInOrg(orgId int64, sess *DBSession) error { // validate that there is an admin user left res, err := sess.Query("SELECT 1 from org_user WHERE org_id=? and role='Admin'", orgId) if err != nil { diff --git a/pkg/services/sqlstore/playlist.go b/pkg/services/sqlstore/playlist.go index 56fae9d3feb..72f3079db8d 100644 --- a/pkg/services/sqlstore/playlist.go +++ b/pkg/services/sqlstore/playlist.go @@ -3,8 +3,6 @@ package sqlstore import ( "fmt" - "github.com/go-xorm/xorm" - "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" ) @@ -85,12 +83,12 @@ func UpdatePlaylist(cmd *m.UpdatePlaylistCommand) error { playlistItems := make([]m.PlaylistItem, 0) - for _, item := range cmd.Items { + for index, item := range cmd.Items { playlistItems = append(playlistItems, m.PlaylistItem{ PlaylistId: playlist.Id, Type: item.Type, Value: item.Value, - Order: item.Order, + Order: index + 1, Title: item.Title, }) } @@ -118,7 +116,7 @@ func DeletePlaylist(cmd *m.DeletePlaylistCommand) error { return m.ErrCommandValidationFailed } - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var rawPlaylistSql = "DELETE FROM playlist WHERE id = ? and org_id = ?" _, err := sess.Exec(rawPlaylistSql, cmd.Id, cmd.OrgId) diff --git a/pkg/services/sqlstore/plugin_setting.go b/pkg/services/sqlstore/plugin_setting.go index 8121b8c7b4b..172995872eb 100644 --- a/pkg/services/sqlstore/plugin_setting.go +++ b/pkg/services/sqlstore/plugin_setting.go @@ -44,7 +44,7 @@ func GetPluginSettingById(query *m.GetPluginSettingByIdQuery) error { } func UpdatePluginSetting(cmd *m.UpdatePluginSettingCmd) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { var pluginSetting m.PluginSetting exists, err := sess.Where("org_id=? and plugin_id=?", cmd.OrgId, cmd.PluginId).Get(&pluginSetting) @@ -74,7 +74,12 @@ func UpdatePluginSetting(cmd *m.UpdatePluginSettingCmd) error { return err } else { for key, data := range cmd.SecureJsonData { - pluginSetting.SecureJsonData[key] = util.Encrypt([]byte(data), setting.SecretKey) + encryptedData, err := util.Encrypt([]byte(data), setting.SecretKey) + if err != nil { + return err + } + + pluginSetting.SecureJsonData[key] = encryptedData } // add state change event on commit success @@ -99,7 +104,7 @@ func UpdatePluginSetting(cmd *m.UpdatePluginSettingCmd) error { } func UpdatePluginSettingVersion(cmd *m.UpdatePluginSettingVersionCmd) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { _, err := sess.Exec("UPDATE plugin_setting SET plugin_version=? WHERE org_id=? AND plugin_id=?", cmd.PluginVersion, cmd.OrgId, cmd.PluginId) return err diff --git a/pkg/services/sqlstore/preferences.go b/pkg/services/sqlstore/preferences.go index 65609a9c57c..399b23f3ffa 100644 --- a/pkg/services/sqlstore/preferences.go +++ b/pkg/services/sqlstore/preferences.go @@ -68,7 +68,7 @@ func GetPreferences(query *m.GetPreferencesQuery) error { } func SavePreferences(cmd *m.SavePreferencesCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { var prefs m.Preferences exists, err := sess.Where("org_id=? AND user_id=?", cmd.OrgId, cmd.UserId).Get(&prefs) diff --git a/pkg/services/sqlstore/quota.go b/pkg/services/sqlstore/quota.go index 53ea8889c56..0a857efce40 100644 --- a/pkg/services/sqlstore/quota.go +++ b/pkg/services/sqlstore/quota.go @@ -2,6 +2,7 @@ package sqlstore import ( "fmt" + "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" @@ -94,7 +95,7 @@ func GetOrgQuotas(query *m.GetOrgQuotasQuery) error { } func UpdateOrgQuota(cmd *m.UpdateOrgQuotaCmd) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { //Check if quota is already defined in the DB quota := m.Quota{ Target: cmd.Target, @@ -194,7 +195,7 @@ func GetUserQuotas(query *m.GetUserQuotasQuery) error { } func UpdateUserQuota(cmd *m.UpdateUserQuotaCmd) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { //Check if quota is already defined in the DB quota := m.Quota{ Target: cmd.Target, diff --git a/pkg/services/sqlstore/shared.go b/pkg/services/sqlstore/shared.go index be4266477c7..0f4aeb969c6 100644 --- a/pkg/services/sqlstore/shared.go +++ b/pkg/services/sqlstore/shared.go @@ -1,27 +1,37 @@ package sqlstore import ( + "time" + "github.com/go-xorm/xorm" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/log" + sqlite3 "github.com/mattn/go-sqlite3" ) -type dbTransactionFunc func(sess *xorm.Session) error -type dbTransactionFunc2 func(sess *session) error - -type session struct { +type DBSession struct { *xorm.Session events []interface{} } -func (sess *session) publishAfterCommit(msg interface{}) { +type dbTransactionFunc func(sess *DBSession) error + +func (sess *DBSession) publishAfterCommit(msg interface{}) { sess.events = append(sess.events, msg) } +func newSession() *DBSession { + return &DBSession{Session: x.NewSession()} +} + func inTransaction(callback dbTransactionFunc) error { + return inTransactionWithRetry(callback, 0) +} + +func inTransactionWithRetry(callback dbTransactionFunc, retry int) error { var err error - sess := x.NewSession() + sess := newSession() defer sess.Close() if err = sess.Begin(); err != nil { @@ -30,28 +40,16 @@ func inTransaction(callback dbTransactionFunc) error { err = callback(sess) - if err != nil { - sess.Rollback() - return err - } else if err = sess.Commit(); err != nil { - return err + // special handling of database locked errors for sqlite, then we can retry 3 times + if sqlError, ok := err.(sqlite3.Error); ok && retry < 5 { + if sqlError.Code == sqlite3.ErrLocked { + sess.Rollback() + time.Sleep(time.Millisecond * time.Duration(10)) + sqlog.Info("Database table locked, sleeping then retrying", "retry", retry) + return inTransactionWithRetry(callback, retry+1) + } } - return nil -} - -func inTransaction2(callback dbTransactionFunc2) error { - var err error - - sess := session{Session: x.NewSession()} - - defer sess.Close() - if err = sess.Begin(); err != nil { - return err - } - - err = callback(&sess) - if err != nil { sess.Rollback() return err diff --git a/pkg/services/sqlstore/sql_test_data.go b/pkg/services/sqlstore/sql_test_data.go index a83ab76ecc0..37740dc8373 100644 --- a/pkg/services/sqlstore/sql_test_data.go +++ b/pkg/services/sqlstore/sql_test_data.go @@ -12,9 +12,9 @@ func init() { bus.AddHandler("sql", InsertSqlTestData) } -func sqlRandomWalk(m1 string, m2 string, intWalker int64, floatWalker float64, sess *session) error { +func sqlRandomWalk(m1 string, m2 string, intWalker int64, floatWalker float64, sess *DBSession) error { - timeWalker := time.Now().UTC().Add(time.Hour * -1) + timeWalker := time.Now().UTC().Add(time.Hour * -200) now := time.Now().UTC() step := time.Minute @@ -29,7 +29,7 @@ func sqlRandomWalk(m1 string, m2 string, intWalker int64, floatWalker float64, s timeWalker = timeWalker.Add(step) row.Id = 0 - row.ValueBigInt += rand.Int63n(100) - 100 + row.ValueBigInt += rand.Int63n(200) - 100 row.ValueDouble += rand.Float64() - 0.5 row.ValueFloat += rand.Float32() - 0.5 row.TimeEpoch = timeWalker.Unix() @@ -45,7 +45,7 @@ func sqlRandomWalk(m1 string, m2 string, intWalker int64, floatWalker float64, s } func InsertSqlTestData(cmd *m.InsertSqlTestDataCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { var err error sqlog.Info("SQL TestData: Clearing previous test data") @@ -61,11 +61,6 @@ func InsertSqlTestData(cmd *m.InsertSqlTestDataCommand) error { sqlRandomWalk("server2", "frontend", 100, 1.123, sess) sqlRandomWalk("server3", "frontend", 100, 1.123, sess) - sqlRandomWalk("server1", "backend", 100, 1.123, sess) - sqlRandomWalk("server2", "backend", 100, 1.123, sess) - sqlRandomWalk("server3", "backend", 100, 1.123, sess) - sqlRandomWalk("db-server1", "backend", 100, 1.123, sess) - return err }) } diff --git a/pkg/services/sqlstore/sqlstore.go b/pkg/services/sqlstore/sqlstore.go index 27fba0068d1..7c267517b90 100644 --- a/pkg/services/sqlstore/sqlstore.go +++ b/pkg/services/sqlstore/sqlstore.go @@ -98,8 +98,8 @@ func SetEngine(engine *xorm.Engine) (err error) { return fmt.Errorf("Sqlstore::Migration failed err: %v\n", err) } + // Init repo instances annotations.SetRepository(&SqlAnnotationRepo{}) - return nil } @@ -158,6 +158,7 @@ func getEngine() (*xorm.Engine, error) { } else { engine.SetMaxOpenConns(DbCfg.MaxOpenConn) engine.SetMaxIdleConns(DbCfg.MaxIdleConn) + engine.SetLogger(&xorm.DiscardLogger{}) // engine.SetLogger(NewXormLogger(log.LvlInfo, log.New("sqlstore.xorm"))) // engine.ShowSQL = true // engine.ShowInfo = true @@ -198,7 +199,7 @@ func LoadConfig() { if DbCfg.Type == "sqlite3" { UseSQLite3 = true - // only allow one connection as sqlite3 has multi threading issues that casue table locks + // only allow one connection as sqlite3 has multi threading issues that cause table locks // DbCfg.MaxIdleConn = 1 // DbCfg.MaxOpenConn = 1 } diff --git a/pkg/services/sqlstore/star.go b/pkg/services/sqlstore/star.go index 09dcb8ed939..7c136c04fbc 100644 --- a/pkg/services/sqlstore/star.go +++ b/pkg/services/sqlstore/star.go @@ -1,8 +1,6 @@ package sqlstore import ( - "github.com/go-xorm/xorm" - "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" ) @@ -36,7 +34,7 @@ func StarDashboard(cmd *m.StarDashboardCommand) error { return m.ErrCommandValidationFailed } - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { entity := m.Star{ UserId: cmd.UserId, @@ -53,7 +51,7 @@ func UnstarDashboard(cmd *m.UnstarDashboardCommand) error { return m.ErrCommandValidationFailed } - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var rawSql = "DELETE FROM star WHERE user_id=? and dashboard_id=?" _, err := sess.Exec(rawSql, cmd.UserId, cmd.DashboardId) return err diff --git a/pkg/services/sqlstore/temp_user.go b/pkg/services/sqlstore/temp_user.go index 0fe5c9612f5..8864d5fb02a 100644 --- a/pkg/services/sqlstore/temp_user.go +++ b/pkg/services/sqlstore/temp_user.go @@ -3,7 +3,6 @@ package sqlstore import ( "time" - "github.com/go-xorm/xorm" "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" ) @@ -16,7 +15,7 @@ func init() { } func UpdateTempUserStatus(cmd *m.UpdateTempUserStatusCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { var rawSql = "UPDATE temp_user SET status=? WHERE code=?" _, err := sess.Exec(rawSql, string(cmd.Status), cmd.Code) return err @@ -24,7 +23,7 @@ func UpdateTempUserStatus(cmd *m.UpdateTempUserStatusCommand) error { } func CreateTempUser(cmd *m.CreateTempUserCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { // create user user := &m.TempUser{ diff --git a/pkg/services/sqlstore/user.go b/pkg/services/sqlstore/user.go index 9a44d6f194e..177f465dc96 100644 --- a/pkg/services/sqlstore/user.go +++ b/pkg/services/sqlstore/user.go @@ -1,11 +1,10 @@ package sqlstore import ( + "strconv" "strings" "time" - "github.com/go-xorm/xorm" - "fmt" "github.com/grafana/grafana/pkg/bus" @@ -33,7 +32,7 @@ func init() { bus.AddHandler("sql", SetUserHelpFlag) } -func getOrgIdForNewUser(cmd *m.CreateUserCommand, sess *session) (int64, error) { +func getOrgIdForNewUser(cmd *m.CreateUserCommand, sess *DBSession) (int64, error) { if cmd.SkipOrgSetup { return -1, nil } @@ -76,7 +75,7 @@ func getOrgIdForNewUser(cmd *m.CreateUserCommand, sess *session) (int64, error) } func CreateUser(cmd *m.CreateUserCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { orgId, err := getOrgIdForNewUser(cmd, sess) if err != nil { return err @@ -219,7 +218,7 @@ func GetUserByEmail(query *m.GetUserByEmailQuery) error { } func UpdateUser(cmd *m.UpdateUserCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { user := m.User{ Name: cmd.Name, @@ -246,7 +245,7 @@ func UpdateUser(cmd *m.UpdateUserCommand) error { } func ChangeUserPassword(cmd *m.ChangeUserPasswordCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { user := m.User{ Password: cmd.NewPassword, @@ -273,10 +272,10 @@ func SetUsingOrg(cmd *m.SetUsingOrgCommand) error { } if !valid { - return fmt.Errorf("user does not belong ot org") + return fmt.Errorf("user does not belong to org") } - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { user := m.User{} sess.Id(cmd.UserId).Get(&user) @@ -319,19 +318,24 @@ func GetUserOrgList(query *m.GetUserOrgListQuery) error { } func GetSignedInUser(query *m.GetSignedInUserQuery) error { + orgId := "u.org_id" + if query.OrgId > 0 { + orgId = strconv.FormatInt(query.OrgId, 10) + } + var rawSql = `SELECT - u.id as user_id, - u.is_admin as is_grafana_admin, - u.email as email, - u.login as login, - u.name as name, - u.help_flags1 as help_flags1, - org.name as org_name, - org_user.role as org_role, - org.id as org_id - FROM ` + dialect.Quote("user") + ` as u - LEFT OUTER JOIN org_user on org_user.org_id = u.org_id and org_user.user_id = u.id - LEFT OUTER JOIN org on org.id = u.org_id ` + u.id as user_id, + u.is_admin as is_grafana_admin, + u.email as email, + u.login as login, + u.name as name, + u.help_flags1 as help_flags1, + org.name as org_name, + org_user.role as org_role, + org.id as org_id + FROM ` + dialect.Quote("user") + ` as u + LEFT OUTER JOIN org_user on org_user.org_id = ` + orgId + ` and org_user.user_id = u.id + LEFT OUTER JOIN org on org.id = org_user.org_id ` sess := x.Table("user") if query.UserId > 0 { @@ -388,7 +392,7 @@ func SearchUsers(query *m.SearchUsersQuery) error { } func DeleteUser(cmd *m.DeleteUserCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { deletes := []string{ "DELETE FROM star WHERE user_id = ?", "DELETE FROM " + dialect.Quote("user") + " WHERE id = ?", @@ -406,7 +410,7 @@ func DeleteUser(cmd *m.DeleteUserCommand) error { } func UpdateUserPermissions(cmd *m.UpdateUserPermissionsCommand) error { - return inTransaction(func(sess *xorm.Session) error { + return inTransaction(func(sess *DBSession) error { user := m.User{} sess.Id(cmd.UserId).Get(&user) @@ -418,7 +422,7 @@ func UpdateUserPermissions(cmd *m.UpdateUserPermissionsCommand) error { } func SetUserHelpFlag(cmd *m.SetUserHelpFlagCommand) error { - return inTransaction2(func(sess *session) error { + return inTransaction(func(sess *DBSession) error { user := m.User{ Id: cmd.UserId, diff --git a/pkg/setting/setting.go b/pkg/setting/setting.go index cacb4547162..9ec995dd96e 100644 --- a/pkg/setting/setting.go +++ b/pkg/setting/setting.go @@ -27,6 +27,7 @@ type Scheme string const ( HTTP Scheme = "http" HTTPS Scheme = "https" + SOCKET Scheme = "socket" DEFAULT_HTTP_ADDR string = "0.0.0.0" ) @@ -65,6 +66,7 @@ var ( HttpAddr, HttpPort string SshPort int CertFile, KeyFile string + SocketPath string RouterLogging bool DataProxyLogging bool StaticRootPath string @@ -158,7 +160,7 @@ var ( logger log.Logger // Grafana.NET URL - GrafanaNetUrl string + GrafanaComUrl string // S3 temp image store S3TempImageStoreBucketUrl string @@ -304,7 +306,7 @@ func evalEnvVarExpression(value string) string { envVar = strings.TrimSuffix(envVar, "}") envValue := os.Getenv(envVar) - // if env variable is hostname and it is emtpy use os.Hostname as default + // if env variable is hostname and it is empty use os.Hostname as default if envVar == "HOSTNAME" && envValue == "" { envValue, _ = os.Hostname() } @@ -473,6 +475,10 @@ func NewConfigContext(args *CommandLineArgs) error { CertFile = server.Key("cert_file").String() KeyFile = server.Key("cert_key").String() } + if server.Key("protocol").MustString("http") == "socket" { + Protocol = SOCKET + SocketPath = server.Key("socket").String() + } Domain = server.Key("domain").MustString("localhost") HttpAddr = server.Key("http_addr").MustString(DEFAULT_HTTP_ADDR) @@ -509,7 +515,7 @@ func NewConfigContext(args *CommandLineArgs) error { // read data source proxy white list DataProxyWhiteList = make(map[string]bool) - for _, hostAndIp := range security.Key("data_source_proxy_whitelist").Strings(" ") { + for _, hostAndIp := range util.SplitString(security.Key("data_source_proxy_whitelist").String()) { DataProxyWhiteList[hostAndIp] = true } @@ -576,7 +582,11 @@ func NewConfigContext(args *CommandLineArgs) error { log.Warn("require_email_validation is enabled but smpt is disabled") } - GrafanaNetUrl = Cfg.Section("grafana_net").Key("url").MustString("https://grafana.com") + // check old key name + GrafanaComUrl = Cfg.Section("grafana_net").Key("url").MustString("") + if GrafanaComUrl == "" { + GrafanaComUrl = Cfg.Section("grafana_com").Key("url").MustString("https://grafana.com") + } imageUploadingSection := Cfg.Section("external_image_storage") ImageUploadProvider = imageUploadingSection.Key("provider").MustString("internal") @@ -625,14 +635,14 @@ func LogConfigurationInfo() { if len(appliedCommandLineProperties) > 0 { for _, prop := range appliedCommandLineProperties { - logger.Info("Config overriden from command line", "arg", prop) + logger.Info("Config overridden from command line", "arg", prop) } } if len(appliedEnvOverrides) > 0 { text.WriteString("\tEnvironment variables used:\n") for _, prop := range appliedEnvOverrides { - logger.Info("Config overriden from Environment variable", "var", prop) + logger.Info("Config overridden from Environment variable", "var", prop) } } diff --git a/pkg/setting/setting_test.go b/pkg/setting/setting_test.go index 586cd8ff61d..b213c2795af 100644 --- a/pkg/setting/setting_test.go +++ b/pkg/setting/setting_test.go @@ -73,7 +73,7 @@ func TestLoadingSettings(t *testing.T) { So(Domain, ShouldEqual, "test2") }) - Convey("Defaults can be overriden in specified config file", func() { + Convey("Defaults can be overridden in specified config file", func() { NewConfigContext(&CommandLineArgs{ HomePath: "../../", Config: filepath.Join(HomePath, "tests/config-files/override.ini"), @@ -103,7 +103,7 @@ func TestLoadingSettings(t *testing.T) { So(DataPath, ShouldEqual, "/tmp/env_override") }) - Convey("instance_name default to hostname even if hostname env is emtpy", func() { + Convey("instance_name default to hostname even if hostname env is empty", func() { NewConfigContext(&CommandLineArgs{ HomePath: "../../", }) diff --git a/pkg/social/common.go b/pkg/social/common.go index 7bce5d2ae8f..a5b8973306f 100644 --- a/pkg/social/common.go +++ b/pkg/social/common.go @@ -2,7 +2,11 @@ package social import ( "fmt" + "io/ioutil" + "net/http" "strings" + + "github.com/grafana/grafana/pkg/log" ) func isEmailAllowed(email string, allowedDomains []string) bool { @@ -18,3 +22,25 @@ func isEmailAllowed(email string, allowedDomains []string) bool { return valid } + +func HttpGet(client *http.Client, url string) ([]byte, error) { + r, err := client.Get(url) + if err != nil { + return nil, err + } + + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + + if r.StatusCode >= 300 { + return nil, fmt.Errorf(string(body)) + } + + log.Trace("HTTP GET %s: %s %s", url, r.Status, string(body)) + + return body, nil +} diff --git a/pkg/social/generic_oauth.go b/pkg/social/generic_oauth.go index f0e36ac064c..76b2b734cd6 100644 --- a/pkg/social/generic_oauth.go +++ b/pkg/social/generic_oauth.go @@ -76,29 +76,39 @@ func (s *GenericOAuth) IsOrganizationMember(client *http.Client) bool { func (s *GenericOAuth) FetchPrivateEmail(client *http.Client) (string, error) { type Record struct { - Email string `json:"email"` - Primary bool `json:"primary"` - Verified bool `json:"verified"` + Email string `json:"email"` + Primary bool `json:"primary"` + IsPrimary bool `json:"is_primary"` + Verified bool `json:"verified"` + IsConfirmed bool `json:"is_confirmed"` } - emailsUrl := fmt.Sprintf(s.apiUrl + "/emails") - r, err := client.Get(emailsUrl) + body, err := HttpGet(client, fmt.Sprintf(s.apiUrl+"/emails")) if err != nil { - return "", err + return "", fmt.Errorf("Error getting email address: %s", err) } - defer r.Body.Close() - var records []Record - if err = json.NewDecoder(r.Body).Decode(&records); err != nil { - return "", err + err = json.Unmarshal(body, &records) + if err != nil { + var data struct { + Values []Record `json:"values"` + } + + err = json.Unmarshal(body, &data) + if err != nil { + return "", fmt.Errorf("Error getting email address: %s", err) + } + + records = data.Values } var email = "" for _, record := range records { - if record.Primary { + if record.Primary || record.IsPrimary { email = record.Email + break } } @@ -110,18 +120,16 @@ func (s *GenericOAuth) FetchTeamMemberships(client *http.Client) ([]int, error) Id int `json:"id"` } - membershipUrl := fmt.Sprintf(s.apiUrl + "/teams") - r, err := client.Get(membershipUrl) + body, err := HttpGet(client, fmt.Sprintf(s.apiUrl+"/teams")) if err != nil { - return nil, err + return nil, fmt.Errorf("Error getting team memberships: %s", err) } - defer r.Body.Close() - var records []Record - if err = json.NewDecoder(r.Body).Decode(&records); err != nil { - return nil, err + err = json.Unmarshal(body, &records) + if err != nil { + return nil, fmt.Errorf("Error getting team memberships: %s", err) } var ids = make([]int, len(records)) @@ -137,18 +145,16 @@ func (s *GenericOAuth) FetchOrganizations(client *http.Client) ([]string, error) Login string `json:"login"` } - url := fmt.Sprintf(s.apiUrl + "/orgs") - r, err := client.Get(url) + body, err := HttpGet(client, fmt.Sprintf(s.apiUrl+"/orgs")) if err != nil { - return nil, err + return nil, fmt.Errorf("Error getting organizations: %s", err) } - defer r.Body.Close() - var records []Record - if err = json.NewDecoder(r.Body).Decode(&records); err != nil { - return nil, err + err = json.Unmarshal(body, &records) + if err != nil { + return nil, fmt.Errorf("Error getting organizations: %s", err) } var logins = make([]string, len(records)) @@ -161,23 +167,22 @@ func (s *GenericOAuth) FetchOrganizations(client *http.Client) ([]string, error) func (s *GenericOAuth) UserInfo(client *http.Client) (*BasicUserInfo, error) { var data struct { - Name string `json:"name"` - Login string `json:"login"` - Username string `json:"username"` - Email string `json:"email"` - Attributes map[string][]string `json:"attributes"` + Name string `json:"name"` + DisplayName string `json:"display_name"` + Login string `json:"login"` + Username string `json:"username"` + Email string `json:"email"` + Attributes map[string][]string `json:"attributes"` } - var err error - r, err := client.Get(s.apiUrl) + body, err := HttpGet(client, s.apiUrl) if err != nil { - return nil, err + return nil, fmt.Errorf("Error getting user info: %s", err) } - defer r.Body.Close() - - if err = json.NewDecoder(r.Body).Decode(&data); err != nil { - return nil, err + err = json.Unmarshal(body, &data) + if err != nil { + return nil, fmt.Errorf("Error getting user info: %s", err) } userInfo := &BasicUserInfo{ @@ -197,6 +202,10 @@ func (s *GenericOAuth) UserInfo(client *http.Client) (*BasicUserInfo, error) { } } + if userInfo.Name == "" && data.DisplayName != "" { + userInfo.Name = data.DisplayName + } + if userInfo.Login == "" && data.Username != "" { userInfo.Login = data.Username } diff --git a/pkg/social/github_oauth.go b/pkg/social/github_oauth.go index 271a472be84..b7b8d7c8156 100644 --- a/pkg/social/github_oauth.go +++ b/pkg/social/github_oauth.go @@ -85,18 +85,16 @@ func (s *SocialGithub) FetchPrivateEmail(client *http.Client) (string, error) { Verified bool `json:"verified"` } - emailsUrl := fmt.Sprintf(s.apiUrl + "/emails") - r, err := client.Get(emailsUrl) + body, err := HttpGet(client, fmt.Sprintf(s.apiUrl+"/emails")) if err != nil { - return "", err + return "", fmt.Errorf("Error getting email address: %s", err) } - defer r.Body.Close() - var records []Record - if err = json.NewDecoder(r.Body).Decode(&records); err != nil { - return "", err + err = json.Unmarshal(body, &records) + if err != nil { + return "", fmt.Errorf("Error getting email address: %s", err) } var email = "" @@ -114,18 +112,16 @@ func (s *SocialGithub) FetchTeamMemberships(client *http.Client) ([]int, error) Id int `json:"id"` } - membershipUrl := fmt.Sprintf(s.apiUrl + "/teams") - r, err := client.Get(membershipUrl) + body, err := HttpGet(client, fmt.Sprintf(s.apiUrl+"/teams")) if err != nil { - return nil, err + return nil, fmt.Errorf("Error getting team memberships: %s", err) } - defer r.Body.Close() - var records []Record - if err = json.NewDecoder(r.Body).Decode(&records); err != nil { - return nil, err + err = json.Unmarshal(body, &records) + if err != nil { + return nil, fmt.Errorf("Error getting team memberships: %s", err) } var ids = make([]int, len(records)) @@ -141,18 +137,16 @@ func (s *SocialGithub) FetchOrganizations(client *http.Client) ([]string, error) Login string `json:"login"` } - url := fmt.Sprintf(s.apiUrl + "/orgs") - r, err := client.Get(url) + body, err := HttpGet(client, fmt.Sprintf(s.apiUrl+"/orgs")) if err != nil { - return nil, err + return nil, fmt.Errorf("Error getting organizations: %s", err) } - defer r.Body.Close() - var records []Record - if err = json.NewDecoder(r.Body).Decode(&records); err != nil { - return nil, err + err = json.Unmarshal(body, &records) + if err != nil { + return nil, fmt.Errorf("Error getting organizations: %s", err) } var logins = make([]string, len(records)) @@ -170,16 +164,14 @@ func (s *SocialGithub) UserInfo(client *http.Client) (*BasicUserInfo, error) { Email string `json:"email"` } - var err error - r, err := client.Get(s.apiUrl) + body, err := HttpGet(client, s.apiUrl) if err != nil { - return nil, err + return nil, fmt.Errorf("Error getting user info: %s", err) } - defer r.Body.Close() - - if err = json.NewDecoder(r.Body).Decode(&data); err != nil { - return nil, err + err = json.Unmarshal(body, &data) + if err != nil { + return nil, fmt.Errorf("Error getting user info: %s", err) } userInfo := &BasicUserInfo{ diff --git a/pkg/social/google_oauth.go b/pkg/social/google_oauth.go index d140f385b66..94a05b25140 100644 --- a/pkg/social/google_oauth.go +++ b/pkg/social/google_oauth.go @@ -2,6 +2,7 @@ package social import ( "encoding/json" + "fmt" "net/http" "github.com/grafana/grafana/pkg/models" @@ -34,16 +35,17 @@ func (s *SocialGoogle) UserInfo(client *http.Client) (*BasicUserInfo, error) { Name string `json:"name"` Email string `json:"email"` } - var err error - r, err := client.Get(s.apiUrl) + body, err := HttpGet(client, s.apiUrl) if err != nil { - return nil, err + return nil, fmt.Errorf("Error getting user info: %s", err) } - defer r.Body.Close() - if err = json.NewDecoder(r.Body).Decode(&data); err != nil { - return nil, err + + err = json.Unmarshal(body, &data) + if err != nil { + return nil, fmt.Errorf("Error getting user info: %s", err) } + return &BasicUserInfo{ Name: data.Name, Email: data.Email, diff --git a/pkg/social/grafananet_oauth.go b/pkg/social/grafana_com_oauth.go similarity index 64% rename from pkg/social/grafananet_oauth.go rename to pkg/social/grafana_com_oauth.go index 83c6d147b70..dd693f18d5d 100644 --- a/pkg/social/grafananet_oauth.go +++ b/pkg/social/grafana_com_oauth.go @@ -2,6 +2,7 @@ package social import ( "encoding/json" + "fmt" "net/http" "github.com/grafana/grafana/pkg/models" @@ -9,7 +10,7 @@ import ( "golang.org/x/oauth2" ) -type SocialGrafanaNet struct { +type SocialGrafanaCom struct { *oauth2.Config url string allowedOrganizations []string @@ -20,19 +21,19 @@ type OrgRecord struct { Login string `json:"login"` } -func (s *SocialGrafanaNet) Type() int { - return int(models.GRAFANANET) +func (s *SocialGrafanaCom) Type() int { + return int(models.GRAFANA_COM) } -func (s *SocialGrafanaNet) IsEmailAllowed(email string) bool { +func (s *SocialGrafanaCom) IsEmailAllowed(email string) bool { return true } -func (s *SocialGrafanaNet) IsSignupAllowed() bool { +func (s *SocialGrafanaCom) IsSignupAllowed() bool { return s.allowSignup } -func (s *SocialGrafanaNet) IsOrganizationMember(organizations []OrgRecord) bool { +func (s *SocialGrafanaCom) IsOrganizationMember(organizations []OrgRecord) bool { if len(s.allowedOrganizations) == 0 { return true } @@ -48,7 +49,7 @@ func (s *SocialGrafanaNet) IsOrganizationMember(organizations []OrgRecord) bool return false } -func (s *SocialGrafanaNet) UserInfo(client *http.Client) (*BasicUserInfo, error) { +func (s *SocialGrafanaCom) UserInfo(client *http.Client) (*BasicUserInfo, error) { var data struct { Name string `json:"name"` Login string `json:"username"` @@ -57,16 +58,14 @@ func (s *SocialGrafanaNet) UserInfo(client *http.Client) (*BasicUserInfo, error) Orgs []OrgRecord `json:"orgs"` } - var err error - r, err := client.Get(s.url + "/api/oauth2/user") + body, err := HttpGet(client, s.url+"/api/oauth2/user") if err != nil { - return nil, err + return nil, fmt.Errorf("Error getting user info: %s", err) } - defer r.Body.Close() - - if err = json.NewDecoder(r.Body).Decode(&data); err != nil { - return nil, err + err = json.Unmarshal(body, &data) + if err != nil { + return nil, fmt.Errorf("Error getting user info: %s", err) } userInfo := &BasicUserInfo{ diff --git a/pkg/social/social.go b/pkg/social/social.go index 29c4b7ecbb4..9d2a53946c7 100644 --- a/pkg/social/social.go +++ b/pkg/social/social.go @@ -8,6 +8,7 @@ import ( "golang.org/x/oauth2" "github.com/grafana/grafana/pkg/setting" + "github.com/grafana/grafana/pkg/util" ) type BasicUserInfo struct { @@ -46,19 +47,19 @@ func NewOAuthService() { setting.OAuthService = &setting.OAuther{} setting.OAuthService.OAuthInfos = make(map[string]*setting.OAuthInfo) - allOauthes := []string{"github", "google", "generic_oauth", "grafananet"} + allOauthes := []string{"github", "google", "generic_oauth", "grafananet", "grafana_com"} for _, name := range allOauthes { sec := setting.Cfg.Section("auth." + name) info := &setting.OAuthInfo{ ClientId: sec.Key("client_id").String(), ClientSecret: sec.Key("client_secret").String(), - Scopes: sec.Key("scopes").Strings(" "), + Scopes: util.SplitString(sec.Key("scopes").String()), AuthUrl: sec.Key("auth_url").String(), TokenUrl: sec.Key("token_url").String(), ApiUrl: sec.Key("api_url").String(), Enabled: sec.Key("enabled").MustBool(), - AllowedDomains: sec.Key("allowed_domains").Strings(" "), + AllowedDomains: util.SplitString(sec.Key("allowed_domains").String()), HostedDomain: sec.Key("hosted_domain").String(), AllowSignup: sec.Key("allow_sign_up").MustBool(), Name: sec.Key("name").MustString(name), @@ -71,6 +72,10 @@ func NewOAuthService() { continue } + if name == "grafananet" { + name = "grafana_com" + } + setting.OAuthService.OAuthInfos[name] = info config := oauth2.Config{ @@ -92,7 +97,7 @@ func NewOAuthService() { apiUrl: info.ApiUrl, allowSignup: info.AllowSignup, teamIds: sec.Key("team_ids").Ints(","), - allowedOrganizations: sec.Key("allowed_organizations").Strings(" "), + allowedOrganizations: util.SplitString(sec.Key("allowed_organizations").String()), } } @@ -115,27 +120,27 @@ func NewOAuthService() { apiUrl: info.ApiUrl, allowSignup: info.AllowSignup, teamIds: sec.Key("team_ids").Ints(","), - allowedOrganizations: sec.Key("allowed_organizations").Strings(" "), + allowedOrganizations: util.SplitString(sec.Key("allowed_organizations").String()), } } - if name == "grafananet" { + if name == "grafana_com" { config = oauth2.Config{ ClientID: info.ClientId, ClientSecret: info.ClientSecret, Endpoint: oauth2.Endpoint{ - AuthURL: setting.GrafanaNetUrl + "/oauth2/authorize", - TokenURL: setting.GrafanaNetUrl + "/api/oauth2/token", + AuthURL: setting.GrafanaComUrl + "/oauth2/authorize", + TokenURL: setting.GrafanaComUrl + "/api/oauth2/token", }, RedirectURL: strings.TrimSuffix(setting.AppUrl, "/") + SocialBaseUrl + name, Scopes: info.Scopes, } - SocialMap["grafananet"] = &SocialGrafanaNet{ + SocialMap["grafana_com"] = &SocialGrafanaCom{ Config: &config, - url: setting.GrafanaNetUrl, + url: setting.GrafanaComUrl, allowSignup: info.AllowSignup, - allowedOrganizations: sec.Key("allowed_organizations").Strings(" "), + allowedOrganizations: util.SplitString(sec.Key("allowed_organizations").String()), } } } diff --git a/pkg/tsdb/influxdb/query.go b/pkg/tsdb/influxdb/query.go index 19f374b938b..f271c5d245f 100644 --- a/pkg/tsdb/influxdb/query.go +++ b/pkg/tsdb/influxdb/query.go @@ -34,18 +34,13 @@ func (query *Query) Build(queryContext *tsdb.QueryContext) (string, error) { return "", err } - res = replaceVariable(res, "$timeFilter", query.renderTimeFilter(queryContext)) - res = replaceVariable(res, "$interval", interval.Text) - res = replaceVariable(res, "$__interval_ms", strconv.FormatInt(interval.Value.Nanoseconds()/int64(time.Millisecond), 10)) - res = replaceVariable(res, "$__interval", interval.Text) + res = strings.Replace(res, "$timeFilter", query.renderTimeFilter(queryContext), -1) + res = strings.Replace(res, "$interval", interval.Text, -1) + res = strings.Replace(res, "$__interval_ms", strconv.FormatInt(interval.Value.Nanoseconds()/int64(time.Millisecond), 10), -1) + res = strings.Replace(res, "$__interval", interval.Text, -1) return res, nil } -func replaceVariable(str string, variable string, value string) string { - count := strings.Count(str, variable) - return strings.Replace(str, variable, value, count) -} - func getDefinedInterval(query *Query, queryContext *tsdb.QueryContext) (*tsdb.Interval, error) { defaultInterval := tsdb.CalculateInterval(queryContext.TimeRange) diff --git a/pkg/tsdb/influxdb/query_part.go b/pkg/tsdb/influxdb/query_part.go index 3145ff1b333..9c41fbbedba 100644 --- a/pkg/tsdb/influxdb/query_part.go +++ b/pkg/tsdb/influxdb/query_part.go @@ -32,6 +32,15 @@ func init() { renders["median"] = QueryDefinition{Renderer: functionRenderer} renders["sum"] = QueryDefinition{Renderer: functionRenderer} + renders["holt_winters"] = QueryDefinition{ + Renderer: functionRenderer, + Params: []DefinitionParameters{{Name: "number", Type: "number"}, {Name: "season", Type: "number"}}, + } + renders["holt_winters_with_fit"] = QueryDefinition{ + Renderer: functionRenderer, + Params: []DefinitionParameters{{Name: "number", Type: "number"}, {Name: "season", Type: "number"}}, + } + renders["derivative"] = QueryDefinition{ Renderer: functionRenderer, Params: []DefinitionParameters{{Name: "duration", Type: "interval"}}, @@ -49,7 +58,7 @@ func init() { renders["stddev"] = QueryDefinition{Renderer: functionRenderer} renders["time"] = QueryDefinition{ Renderer: functionRenderer, - Params: []DefinitionParameters{{Name: "interval", Type: "time"}}, + Params: []DefinitionParameters{{Name: "interval", Type: "time"}, {Name: "offset", Type: "time"}}, } renders["fill"] = QueryDefinition{ Renderer: functionRenderer, diff --git a/pkg/tsdb/models.go b/pkg/tsdb/models.go index 838767dd5d9..5ae27867c44 100644 --- a/pkg/tsdb/models.go +++ b/pkg/tsdb/models.go @@ -27,6 +27,7 @@ type Request struct { type Response struct { BatchTimings []*BatchTiming `json:"timings"` Results map[string]*QueryResult `json:"results"` + Message string `json:"message,omitempty"` } type BatchTiming struct { @@ -45,18 +46,30 @@ func (br *BatchResult) WithError(err error) *BatchResult { } type QueryResult struct { - Error error `json:"-"` - ErrorString string `json:"error"` - RefId string `json:"refId"` - Series TimeSeriesSlice `json:"series"` + Error error `json:"-"` + ErrorString string `json:"error,omitempty"` + RefId string `json:"refId"` + Meta *simplejson.Json `json:"meta,omitempty"` + Series TimeSeriesSlice `json:"series"` + Tables []*Table `json:"tables"` } type TimeSeries struct { Name string `json:"name"` Points TimeSeriesPoints `json:"points"` - Tags map[string]string `json:"tags"` + Tags map[string]string `json:"tags,omitempty"` } +type Table struct { + Columns []TableColumn `json:"columns"` + Rows []RowValues `json:"rows"` +} + +type TableColumn struct { + Text string `json:"text"` +} + +type RowValues []interface{} type TimePoint [2]null.Float type TimeSeriesPoints []TimePoint type TimeSeriesSlice []*TimeSeries diff --git a/pkg/tsdb/mysql/macros.go b/pkg/tsdb/mysql/macros.go new file mode 100644 index 00000000000..def2fde9fcc --- /dev/null +++ b/pkg/tsdb/mysql/macros.go @@ -0,0 +1,80 @@ +package mysql + +import ( + "fmt" + "regexp" + + "github.com/grafana/grafana/pkg/tsdb" +) + +//const rsString = `(?:"([^"]*)")`; +const rsIdentifier = `([_a-zA-Z0-9]+)` +const sExpr = `\$` + rsIdentifier + `\(([^\)]*)\)` + +type SqlMacroEngine interface { + Interpolate(sql string) (string, error) +} + +type MySqlMacroEngine struct { + TimeRange *tsdb.TimeRange +} + +func NewMysqlMacroEngine(timeRange *tsdb.TimeRange) SqlMacroEngine { + return &MySqlMacroEngine{ + TimeRange: timeRange, + } +} + +func (m *MySqlMacroEngine) Interpolate(sql string) (string, error) { + rExp, _ := regexp.Compile(sExpr) + var macroError error + + sql = ReplaceAllStringSubmatchFunc(rExp, sql, func(groups []string) string { + res, err := m.EvaluateMacro(groups[1], groups[2:]) + if err != nil && macroError == nil { + macroError = err + return "macro_error()" + } + return res + }) + + if macroError != nil { + return "", macroError + } + + return sql, nil +} + +func ReplaceAllStringSubmatchFunc(re *regexp.Regexp, str string, repl func([]string) string) string { + result := "" + lastIndex := 0 + + for _, v := range re.FindAllSubmatchIndex([]byte(str), -1) { + groups := []string{} + for i := 0; i < len(v); i += 2 { + groups = append(groups, str[v[i]:v[i+1]]) + } + + result += str[lastIndex:v[0]] + repl(groups) + lastIndex = v[1] + } + + return result + str[lastIndex:] +} + +func (m *MySqlMacroEngine) EvaluateMacro(name string, args []string) (string, error) { + switch name { + case "__time": + if len(args) == 0 { + return "", fmt.Errorf("missing time column argument for macro %v", name) + } + return fmt.Sprintf("UNIX_TIMESTAMP(%s) as time_sec", args[0]), nil + case "__timeFilter": + if len(args) == 0 { + return "", fmt.Errorf("missing time column argument for macro %v", name) + } + return fmt.Sprintf("%s > FROM_UNIXTIME(%d) AND %s < FROM_UNIXTIME(%d)", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil + default: + return "", fmt.Errorf("Unknown macro %v", name) + } +} diff --git a/pkg/tsdb/mysql/macros_test.go b/pkg/tsdb/mysql/macros_test.go new file mode 100644 index 00000000000..5b6b885ff0e --- /dev/null +++ b/pkg/tsdb/mysql/macros_test.go @@ -0,0 +1,43 @@ +package mysql + +import ( + "testing" + + "github.com/grafana/grafana/pkg/tsdb" + . "github.com/smartystreets/goconvey/convey" +) + +func TestMacroEngine(t *testing.T) { + Convey("MacroEngine", t, func() { + + Convey("interpolate __time function", func() { + engine := &MySqlMacroEngine{} + + sql, err := engine.Interpolate("select $__time(time_column)") + So(err, ShouldBeNil) + + So(sql, ShouldEqual, "select UNIX_TIMESTAMP(time_column) as time_sec") + }) + + Convey("interpolate __time function wrapped in aggregation", func() { + engine := &MySqlMacroEngine{} + + sql, err := engine.Interpolate("select min($__time(time_column))") + So(err, ShouldBeNil) + + So(sql, ShouldEqual, "select min(UNIX_TIMESTAMP(time_column) as time_sec)") + }) + + Convey("interpolate __timeFilter function", func() { + engine := &MySqlMacroEngine{ + TimeRange: &tsdb.TimeRange{From: "5m", To: "now"}, + } + + sql, err := engine.Interpolate("WHERE $__timeFilter(time_column)") + So(err, ShouldBeNil) + + So(sql, ShouldEqual, "WHERE time_column > FROM_UNIXTIME(18446744066914186738) AND time_column < FROM_UNIXTIME(18446744066914187038)") + }) + + }) +} diff --git a/pkg/tsdb/mysql/mysql.go b/pkg/tsdb/mysql/mysql.go index be59079c65c..d4bd9dbacc1 100644 --- a/pkg/tsdb/mysql/mysql.go +++ b/pkg/tsdb/mysql/mysql.go @@ -7,9 +7,13 @@ import ( "strconv" "sync" + "time" + + "github.com/go-sql-driver/mysql" "github.com/go-xorm/core" "github.com/go-xorm/xorm" "github.com/grafana/grafana/pkg/components/null" + "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/tsdb" @@ -81,6 +85,7 @@ func (e *MysqlExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, co QueryResults: make(map[string]*tsdb.QueryResult), } + macroEngine := NewMysqlMacroEngine(context.TimeRange) session := e.engine.NewSession() defer session.Close() db := session.DB() @@ -91,48 +96,167 @@ func (e *MysqlExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, co continue } + queryResult := &tsdb.QueryResult{Meta: simplejson.New(), RefId: query.RefId} + result.QueryResults[query.RefId] = queryResult + + rawSql, err := macroEngine.Interpolate(rawSql) + if err != nil { + queryResult.Error = err + continue + } + + queryResult.Meta.Set("sql", rawSql) + rows, err := db.Query(rawSql) if err != nil { - result.QueryResults[query.RefId] = &tsdb.QueryResult{Error: err} + queryResult.Error = err continue } defer rows.Close() - result.QueryResults[query.RefId] = e.TransformToTimeSeries(query, rows) + format := query.Model.Get("format").MustString("time_series") + + switch format { + case "time_series": + err := e.TransformToTimeSeries(query, rows, queryResult) + if err != nil { + queryResult.Error = err + continue + } + case "table": + err := e.TransformToTable(query, rows, queryResult) + if err != nil { + queryResult.Error = err + continue + } + } } return result } -func (e MysqlExecutor) TransformToTimeSeries(query *tsdb.Query, rows *core.Rows) *tsdb.QueryResult { - result := &tsdb.QueryResult{RefId: query.RefId} +func (e MysqlExecutor) TransformToTable(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult) error { + columnNames, err := rows.Columns() + columnCount := len(columnNames) + + if err != nil { + return err + } + + table := &tsdb.Table{ + Columns: make([]tsdb.TableColumn, columnCount), + Rows: make([]tsdb.RowValues, 0), + } + + for i, name := range columnNames { + table.Columns[i].Text = name + } + + columnTypes, err := rows.ColumnTypes() + if err != nil { + return err + } + + rowLimit := 1000000 + rowCount := 0 + + for ; rows.Next(); rowCount += 1 { + if rowCount > rowLimit { + return fmt.Errorf("MySQL query row limit exceeded, limit %d", rowLimit) + } + + values, err := e.getTypedRowData(columnTypes, rows) + if err != nil { + return err + } + + table.Rows = append(table.Rows, values) + } + + result.Tables = append(result.Tables, table) + result.Meta.Set("rowCount", rowCount) + return nil +} + +func (e MysqlExecutor) getTypedRowData(types []*sql.ColumnType, rows *core.Rows) (tsdb.RowValues, error) { + values := make([]interface{}, len(types)) + + for i, stype := range types { + switch stype.DatabaseTypeName() { + case mysql.FieldTypeNameTiny: + values[i] = new(int8) + case mysql.FieldTypeNameInt24: + values[i] = new(int32) + case mysql.FieldTypeNameShort: + values[i] = new(int16) + case mysql.FieldTypeNameVarString: + values[i] = new(string) + case mysql.FieldTypeNameVarChar: + values[i] = new(string) + case mysql.FieldTypeNameLong: + values[i] = new(int) + case mysql.FieldTypeNameLongLong: + values[i] = new(int64) + case mysql.FieldTypeNameDouble: + values[i] = new(float64) + case mysql.FieldTypeNameDecimal: + values[i] = new(float32) + case mysql.FieldTypeNameNewDecimal: + values[i] = new(float64) + case mysql.FieldTypeNameTimestamp: + values[i] = new(time.Time) + case mysql.FieldTypeNameDateTime: + values[i] = new(time.Time) + case mysql.FieldTypeNameTime: + values[i] = new(time.Duration) + case mysql.FieldTypeNameYear: + values[i] = new(int16) + case mysql.FieldTypeNameNULL: + values[i] = nil + default: + return nil, fmt.Errorf("Database type %s not supported", stype.DatabaseTypeName()) + } + } + + if err := rows.Scan(values...); err != nil { + return nil, err + } + + return values, nil +} + +func (e MysqlExecutor) TransformToTimeSeries(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult) error { pointsBySeries := make(map[string]*tsdb.TimeSeries) columnNames, err := rows.Columns() if err != nil { - result.Error = err - return result + return err } rowData := NewStringStringScan(columnNames) - for rows.Next() { + rowLimit := 1000000 + rowCount := 0 + + for ; rows.Next(); rowCount += 1 { + if rowCount > rowLimit { + return fmt.Errorf("MySQL query row limit exceeded, limit %d", rowLimit) + } + err := rowData.Update(rows.Rows) if err != nil { - e.log.Error("Mysql response parsing", "error", err) - result.Error = err - return result + e.log.Error("MySQL response parsing", "error", err) + return fmt.Errorf("MySQL response parsing error %v", err) } if rowData.metric == "" { rowData.metric = "Unknown" } - e.log.Info("Rows", "metric", rowData.metric, "time", rowData.time, "value", rowData.value) + //e.log.Debug("Rows", "metric", rowData.metric, "time", rowData.time, "value", rowData.value) if !rowData.time.Valid { - result.Error = fmt.Errorf("Found row with no time value") - return result + return fmt.Errorf("Found row with no time value") } if series, exist := pointsBySeries[rowData.metric]; exist { @@ -148,7 +272,8 @@ func (e MysqlExecutor) TransformToTimeSeries(query *tsdb.Query, rows *core.Rows) result.Series = append(result.Series, value) } - return result + result.Meta.Set("rowCount", rowCount) + return nil } type stringStringScan struct { diff --git a/pkg/tsdb/opentsdb/opentsdb.go b/pkg/tsdb/opentsdb/opentsdb.go index c0ba6603b20..987f341c7dc 100644 --- a/pkg/tsdb/opentsdb/opentsdb.go +++ b/pkg/tsdb/opentsdb/opentsdb.go @@ -191,7 +191,7 @@ func (e *OpenTsdbExecutor) buildMetric(query *tsdb.Query) map[string]interface{} } if !counterMaxCheck && (!resetValueCheck || resetValue.MustFloat64() == 0) { - rateOptions["dropcounter"] = true + rateOptions["dropResets"] = true } metric["rateOptions"] = rateOptions diff --git a/pkg/tsdb/time_range_test.go b/pkg/tsdb/time_range_test.go index 5412d0d05f3..5d89e3977b8 100644 --- a/pkg/tsdb/time_range_test.go +++ b/pkg/tsdb/time_range_test.go @@ -52,8 +52,8 @@ func TestTimeRange(t *testing.T) { }) Convey("now-10m ", func() { - fiveMinAgo, _ := time.ParseDuration("-10m") - expected := now.Add(fiveMinAgo) + tenMinAgo, _ := time.ParseDuration("-10m") + expected := now.Add(tenMinAgo) res, err := tr.ParseTo() So(err, ShouldBeNil) So(res.Unix(), ShouldEqual, expected.Unix()) diff --git a/pkg/util/encryption.go b/pkg/util/encryption.go index 42586ddac8b..2f8b5d7af56 100644 --- a/pkg/util/encryption.go +++ b/pkg/util/encryption.go @@ -5,26 +5,25 @@ import ( "crypto/cipher" "crypto/rand" "crypto/sha256" + "errors" "io" - - "github.com/grafana/grafana/pkg/log" ) const saltLength = 8 -func Decrypt(payload []byte, secret string) []byte { +func Decrypt(payload []byte, secret string) ([]byte, error) { salt := payload[:saltLength] key := encryptionKeyToBytes(secret, string(salt)) block, err := aes.NewCipher(key) if err != nil { - log.Fatal(4, err.Error()) + return nil, err } // The IV needs to be unique, but not secure. Therefore it's common to // include it at the beginning of the ciphertext. if len(payload) < aes.BlockSize { - log.Fatal(4, "payload too short") + return nil, errors.New("payload too short") } iv := payload[saltLength : saltLength+aes.BlockSize] payload = payload[saltLength+aes.BlockSize:] @@ -33,16 +32,16 @@ func Decrypt(payload []byte, secret string) []byte { // XORKeyStream can work in-place if the two arguments are the same. stream.XORKeyStream(payload, payload) - return payload + return payload, nil } -func Encrypt(payload []byte, secret string) []byte { +func Encrypt(payload []byte, secret string) ([]byte, error) { salt := GetRandomString(saltLength) key := encryptionKeyToBytes(secret, salt) block, err := aes.NewCipher(key) if err != nil { - log.Fatal(4, err.Error()) + return nil, err } // The IV needs to be unique, but not secure. Therefore it's common to @@ -51,13 +50,13 @@ func Encrypt(payload []byte, secret string) []byte { copy(ciphertext[:saltLength], []byte(salt)) iv := ciphertext[saltLength : saltLength+aes.BlockSize] if _, err := io.ReadFull(rand.Reader, iv); err != nil { - log.Fatal(4, err.Error()) + return nil, err } stream := cipher.NewCFBEncrypter(block, iv) stream.XORKeyStream(ciphertext[saltLength+aes.BlockSize:], payload) - return ciphertext + return ciphertext, nil } // Key needs to be 32bytes diff --git a/pkg/util/encryption_test.go b/pkg/util/encryption_test.go index 5f1dc18fea3..908931c9516 100644 --- a/pkg/util/encryption_test.go +++ b/pkg/util/encryption_test.go @@ -18,9 +18,11 @@ func TestEncryption(t *testing.T) { }) Convey("When decrypting basic payload", t, func() { - encrypted := Encrypt([]byte("grafana"), "1234") - decrypted := Decrypt(encrypted, "1234") + encrypted, encryptErr := Encrypt([]byte("grafana"), "1234") + decrypted, decryptErr := Decrypt(encrypted, "1234") + So(encryptErr, ShouldBeNil) + So(decryptErr, ShouldBeNil) So(string(decrypted), ShouldEqual, "grafana") }) diff --git a/pkg/util/strings.go b/pkg/util/strings.go index 8598949c2cb..3ccf8b8ce7e 100644 --- a/pkg/util/strings.go +++ b/pkg/util/strings.go @@ -1,5 +1,9 @@ package util +import ( + "regexp" +) + func StringsFallback2(val1 string, val2 string) string { return stringsFallback(val1, val2) } @@ -16,3 +20,11 @@ func stringsFallback(vals ...string) string { } return "" } + +func SplitString(str string) []string { + if len(str) == 0 { + return []string{} + } + + return regexp.MustCompile("[, ]+").Split(str, -1) +} diff --git a/pkg/util/strings_test.go b/pkg/util/strings_test.go index c959dfd1d54..ac8653158cc 100644 --- a/pkg/util/strings_test.go +++ b/pkg/util/strings_test.go @@ -13,3 +13,14 @@ func TestStringsUtil(t *testing.T) { So(StringsFallback3("", "", "3"), ShouldEqual, "3") }) } + +func TestSplitString(t *testing.T) { + Convey("Splits strings correctly", t, func() { + So(SplitString(""), ShouldResemble, []string{}) + So(SplitString("test"), ShouldResemble, []string{"test"}) + So(SplitString("test1 test2 test3"), ShouldResemble, []string{"test1", "test2", "test3"}) + So(SplitString("test1,test2,test3"), ShouldResemble, []string{"test1", "test2", "test3"}) + So(SplitString("test1, test2, test3"), ShouldResemble, []string{"test1", "test2", "test3"}) + So(SplitString("test1 , test2 test3"), ShouldResemble, []string{"test1", "test2", "test3"}) + }) +} diff --git a/public/app/core/components/grafana_app.ts b/public/app/core/components/grafana_app.ts index f48d941a8ba..5a677094754 100644 --- a/public/app/core/components/grafana_app.ts +++ b/public/app/core/components/grafana_app.ts @@ -105,10 +105,14 @@ export function grafanaAppDirective(playlistSrv, contextSrv) { if (pageClass) { body.removeClass(pageClass); } - pageClass = data.$$route.pageClass; - if (pageClass) { - body.addClass(pageClass); + + if (data.$$route) { + pageClass = data.$$route.pageClass; + if (pageClass) { + body.addClass(pageClass); + } } + $("#tooltip, .tooltip").remove(); // check for kiosk url param @@ -194,6 +198,15 @@ export function grafanaAppDirective(playlistSrv, contextSrv) { }); } } + + // hide menus + var openMenus = body.find('.navbar-page-btn--open'); + if (openMenus.length > 0) { + if (target.parents('.navbar-page-btn--open').length === 0) { + openMenus.removeClass('navbar-page-btn--open'); + } + } + // hide sidemenu if (!ignoreSideMenuHide && !contextSrv.pinned && body.find('.sidemenu').length > 0) { if (target.parents('.sidemenu').length === 0) { diff --git a/public/app/core/components/help/help.html b/public/app/core/components/help/help.html index 3356f21d452..c07d57a0ffc 100644 --- a/public/app/core/components/help/help.html +++ b/public/app/core/components/help/help.html @@ -1,7 +1,7 @@
    @@ -107,11 +106,11 @@ td[class="stack-column-center"] { - - - + + +
    - -
    + +
    diff --git a/public/app/features/alerting/alert_def.ts b/public/app/features/alerting/alert_def.ts index c65a35878e7..d80c299ec6d 100644 --- a/public/app/features/alerting/alert_def.ts +++ b/public/app/features/alerting/alert_def.ts @@ -91,17 +91,9 @@ function getStateDisplayModel(state) { stateClass: 'alert-state-warning' }; } - case 'execution_error': { - return { - text: 'EXECUTION ERROR', - iconClass: 'icon-gf icon-gf-critical', - stateClass: 'alert-state-critical' - }; - } - case 'paused': { return { - text: 'paused', + text: 'PAUSED', iconClass: "fa fa-pause", stateClass: 'alert-state-paused' }; @@ -131,6 +123,29 @@ function joinEvalMatches(matches, separator: string) { }, []).join(separator); } +function getAlertAnnotationInfo(ah) { + + // backward compatability, can be removed in grafana 5.x + // old way stored evalMatches in data property directly, + // new way stores it in evalMatches property on new data object + + if (_.isArray(ah.data)) { + return joinEvalMatches(ah.data, ', '); + } else if (_.isArray(ah.data.evalMatches)) { + return joinEvalMatches(ah.data.evalMatches, ', '); + } + + if (ah.data.error) { + return "Error: " + ah.data.error; + } + + if (ah.data.noData || ah.data.no_data) { + return "No Data"; + } + + return ""; +} + export default { alertQueryDef: alertQueryDef, getStateDisplayModel: getStateDisplayModel, @@ -141,6 +156,6 @@ export default { executionErrorModes: executionErrorModes, reducerTypes: reducerTypes, createReducerPart: createReducerPart, - joinEvalMatches: joinEvalMatches, + getAlertAnnotationInfo: getAlertAnnotationInfo, alertStateSortScore: alertStateSortScore, }; diff --git a/public/app/features/alerting/alert_list_ctrl.ts b/public/app/features/alerting/alert_list_ctrl.ts index c74f61c8a1c..169d38612c5 100644 --- a/public/app/features/alerting/alert_list_ctrl.ts +++ b/public/app/features/alerting/alert_list_ctrl.ts @@ -2,13 +2,12 @@ import angular from 'angular'; import _ from 'lodash'; -import coreModule from '../../core/core_module'; -import appEvents from '../../core/app_events'; import moment from 'moment'; + +import {coreModule, appEvents} from 'app/core/core'; import alertDef from './alert_def'; export class AlertListCtrl { - alerts: any; stateFilters = [ {text: 'All', value: null}, @@ -17,13 +16,15 @@ export class AlertListCtrl { {text: 'No Data', value: 'no_data'}, {text: 'Paused', value: 'paused'}, ]; - filters = { state: 'ALL' }; + navModel: any; /** @ngInject */ - constructor(private backendSrv, private $location, private $scope) { + constructor(private backendSrv, private $location, private $scope, navModelSrv) { + this.navModel = navModelSrv.getAlertingNav(0); + var params = $location.search(); this.filters.state = params.state || null; this.loadAlerts(); diff --git a/public/app/features/alerting/alert_tab_ctrl.ts b/public/app/features/alerting/alert_tab_ctrl.ts index b21e5363ad8..938b24ebc21 100644 --- a/public/app/features/alerting/alert_tab_ctrl.ts +++ b/public/app/features/alerting/alert_tab_ctrl.ts @@ -81,16 +81,7 @@ export class AlertTabCtrl { this.alertHistory = _.map(res, ah => { ah.time = moment(ah.time).format('MMM D, YYYY HH:mm:ss'); ah.stateModel = alertDef.getStateDisplayModel(ah.newState); - ah.metrics = alertDef.joinEvalMatches(ah.data, ', '); - - if (ah.data.errorMessage) { - ah.metrics = "Error: " + ah.data.errorMessage; - } - - if (ah.data.no_data) { - ah.metrics = "(due to no data)"; - } - + ah.info = alertDef.getAlertAnnotationInfo(ah); return ah; }); }); diff --git a/public/app/features/alerting/notification_edit_ctrl.ts b/public/app/features/alerting/notification_edit_ctrl.ts index 6ad11cb9023..768cc16266e 100644 --- a/public/app/features/alerting/notification_edit_ctrl.ts +++ b/public/app/features/alerting/notification_edit_ctrl.ts @@ -7,6 +7,7 @@ import {appEvents, coreModule} from 'app/core/core'; export class AlertNotificationEditCtrl { theForm: any; + navModel: any; testSeverity = "critical"; notifiers: any; notifierTemplateId: string; @@ -23,7 +24,9 @@ export class AlertNotificationEditCtrl { }; /** @ngInject */ - constructor(private $routeParams, private backendSrv, private $location, private $templateCache) { + constructor(private $routeParams, private backendSrv, private $location, private $templateCache, navModelSrv) { + this.navModel = navModelSrv.getAlertingNav(); + this.backendSrv.get(`/api/alert-notifiers`).then(notifiers => { this.notifiers = notifiers; diff --git a/public/app/features/alerting/notifications_list_ctrl.ts b/public/app/features/alerting/notifications_list_ctrl.ts index 705f3a27c4f..c55668c1ed2 100644 --- a/public/app/features/alerting/notifications_list_ctrl.ts +++ b/public/app/features/alerting/notifications_list_ctrl.ts @@ -2,16 +2,19 @@ import angular from 'angular'; import _ from 'lodash'; -import coreModule from '../../core/core_module'; import config from 'app/core/config'; -export class AlertNotificationsListCtrl { +import {coreModule} from 'app/core/core'; + +export class AlertNotificationsListCtrl { notifications: any; + navModel: any; /** @ngInject */ - constructor(private backendSrv, private $scope) { + constructor(private backendSrv, private $scope, navModelSrv) { this.loadNotifications(); + this.navModel = navModelSrv.getAlertingNav(1); } loadNotifications() { diff --git a/public/app/features/alerting/partials/alert_list.html b/public/app/features/alerting/partials/alert_list.html index 5b723386904..cb76e381bec 100644 --- a/public/app/features/alerting/partials/alert_list.html +++ b/public/app/features/alerting/partials/alert_list.html @@ -1,5 +1,4 @@ - - +
    @@ -62,37 +83,53 @@
    -
    -
    - Name - -
    -
    - Datasource -
    - +
    Options
    +
    +
    + Name + +
    +
    + Data source +
    + +
    -
    - - +
    +
    + + + + + + + + +
    +
    + + +
    -
    - - - - +
    Query
    + + + + -
    -
    - - +
    +
    + + +
    -
    diff --git a/public/app/features/annotations/partials/event_editor.html b/public/app/features/annotations/partials/event_editor.html new file mode 100644 index 00000000000..6e44b6f768d --- /dev/null +++ b/public/app/features/annotations/partials/event_editor.html @@ -0,0 +1,38 @@ + +
    Add annotation
    + +
    +
    +
    + Title + +
    + +
    +
    + Time + +
    +
    + +
    +
    + Start + +
    +
    + End + +
    +
    +
    + Description + +
    + +
    + + Cancel +
    +
    + diff --git a/public/app/features/dashboard/all.js b/public/app/features/dashboard/all.js index c362f9cd032..cdea7aab3c6 100644 --- a/public/app/features/dashboard/all.js +++ b/public/app/features/dashboard/all.js @@ -1,23 +1,26 @@ define([ './dashboard_ctrl', './alerting_srv', + './history/history', './dashboardLoaderSrv', './dashnav/dashnav', './submenu/submenu', - './saveDashboardAsCtrl', + './save_as_modal', + './save_modal', './shareModalCtrl', './shareSnapshotCtrl', './dashboard_srv', './viewStateSrv', './time_srv', './unsavedChangesSrv', + './unsaved_changes_modal', './timepicker/timepicker', './graphiteImportCtrl', './impression_store', './upload', './import/dash_import', './export/export_modal', - './dash_list_ctrl', + './export_data/export_data_modal', './ad_hoc_filters', './row/row_ctrl', './repeat_option/repeat_option', diff --git a/public/app/features/dashboard/dash_list_ctrl.ts b/public/app/features/dashboard/dash_list_ctrl.ts deleted file mode 100644 index f08d7507e65..00000000000 --- a/public/app/features/dashboard/dash_list_ctrl.ts +++ /dev/null @@ -1,11 +0,0 @@ -/// - -import coreModule from 'app/core/core_module'; - -export class DashListCtrl { - /** @ngInject */ - constructor() { - } -} - -coreModule.controller('DashListCtrl', DashListCtrl); diff --git a/public/app/features/dashboard/dashboard_srv.ts b/public/app/features/dashboard/dashboard_srv.ts index 5939ff83a15..9d622f4bbc7 100644 --- a/public/app/features/dashboard/dashboard_srv.ts +++ b/public/app/features/dashboard/dashboard_srv.ts @@ -23,32 +23,7 @@ export class DashboardSrv { return this.dash; } - saveDashboard(options) { - if (!this.dash.meta.canSave && options.makeEditable !== true) { - return Promise.resolve(); - } - - if (this.dash.title === 'New dashboard') { - return this.saveDashboardAs(); - } - - var clone = this.dash.getSaveModelClone(); - - return this.backendSrv.saveDashboard(clone, options).then(data => { - this.dash.version = data.version; - - this.$rootScope.appEvent('dashboard-saved', this.dash); - - var dashboardUrl = '/dashboard/db/' + data.slug; - if (dashboardUrl !== this.$location.path()) { - this.$location.url(dashboardUrl); - } - - this.$rootScope.appEvent('alert-success', ['Dashboard saved', 'Saved as ' + clone.title]); - }).catch(this.handleSaveDashboardError.bind(this)); - } - - handleSaveDashboardError(err) { + handleSaveDashboardError(clone, err) { if (err.data && err.data.status === "version-mismatch") { err.isHandled = true; @@ -59,7 +34,7 @@ export class DashboardSrv { yesText: "Save & Overwrite", icon: "fa-warning", onConfirm: () => { - this.saveDashboard({overwrite: true}); + this.save(clone, {overwrite: true}); } }); } @@ -74,7 +49,7 @@ export class DashboardSrv { yesText: "Save & Overwrite", icon: "fa-warning", onConfirm: () => { - this.saveDashboard({overwrite: true}); + this.save(clone, {overwrite: true}); } }); } @@ -90,28 +65,73 @@ export class DashboardSrv { icon: "fa-warning", altActionText: "Save As", onAltAction: () => { - this.saveDashboardAs(); + this.showSaveAsModal(); }, onConfirm: () => { - this.saveDashboard({overwrite: true}); + this.save(clone, {overwrite: true}); } }); } } - saveDashboardAs() { + postSave(clone, data) { + this.dash.version = data.version; + + var dashboardUrl = '/dashboard/db/' + data.slug; + if (dashboardUrl !== this.$location.path()) { + this.$location.url(dashboardUrl); + } + + this.$rootScope.appEvent('dashboard-saved', this.dash); + this.$rootScope.appEvent('alert-success', ['Dashboard saved', 'Saved as ' + clone.title]); + } + + save(clone, options) { + return this.backendSrv.saveDashboard(clone, options) + .then(this.postSave.bind(this, clone)) + .catch(this.handleSaveDashboardError.bind(this, clone)); + } + + saveDashboard(options, clone) { + if (clone) { + this.setCurrent(this.create(clone, this.dash.meta)); + } + + if (!this.dash.meta.canSave && options.makeEditable !== true) { + return Promise.resolve(); + } + + if (this.dash.title === 'New dashboard') { + return this.showSaveAsModal(); + } + + if (this.dash.version > 0) { + return this.showSaveModal(); + } + + return this.save(this.dash.getSaveModelClone(), options); + } + + showSaveAsModal() { var newScope = this.$rootScope.$new(); newScope.clone = this.dash.getSaveModelClone(); newScope.clone.editable = true; newScope.clone.hideControls = false; this.$rootScope.appEvent('show-modal', { - src: 'public/app/features/dashboard/partials/saveDashboardAs.html', + templateHtml: '', scope: newScope, modalClass: 'modal--narrow' }); } + showSaveModal() { + this.$rootScope.appEvent('show-modal', { + templateHtml: '', + scope: this.$rootScope.$new(), + modalClass: 'modal--narrow' + }); + } } coreModule.service('dashboardSrv', DashboardSrv); diff --git a/public/app/features/dashboard/dashnav/dashnav.html b/public/app/features/dashboard/dashnav/dashnav.html index 7deb46931c2..f1615a0bd1c 100644 --- a/public/app/features/dashboard/dashnav/dashnav.html +++ b/public/app/features/dashboard/dashnav/dashnav.html @@ -1,93 +1,62 @@ - + - - - {{dashboard.title}} - - - - - - - {{dashboard.title}} -   (snapshot) - - - -
    + + + + + + + + + + + + + + + + + + + + +
    VersionDateUpdated ByNotes
    + + + {{revision.version}}{{revision.createdDateString}}{{revision.createdBy}}{{revision.message}} + +   Restore + + +   Latest + +
    + +
    + + Fetching more entries… +
    + +
    +
    + + +
    +
    + + + + +
    + + +
    +
    + + Fetching changes… +
    + +
    + +   Restore to version {{ctrl.baseInfo.version}} + +

    + Comparing Version {{ctrl.baseInfo.version}} + + Version {{ctrl.newInfo.version}} + (Latest) +

    +
    +

    + Version {{ctrl.newInfo.version}} updated by + {{ctrl.newInfo.createdBy}} + {{ctrl.newInfo.ageString}} + - {{ctrl.newInfo.message}} +

    +

    + Version {{ctrl.baseInfo.version}} updated by + {{ctrl.baseInfo.createdBy}} + {{ctrl.baseInfo.ageString}} + - {{ctrl.baseInfo.message}} +

    +
    +
    +
    +
    +
    +
    +
    +
    + diff --git a/public/app/features/dashboard/history/history.ts b/public/app/features/dashboard/history/history.ts new file mode 100644 index 00000000000..10ab3f3fa28 --- /dev/null +++ b/public/app/features/dashboard/history/history.ts @@ -0,0 +1,203 @@ +/// + +import './history_srv'; + +import _ from 'lodash'; +import angular from 'angular'; +import moment from 'moment'; + +import {DashboardModel} from '../model'; +import {HistoryListOpts, RevisionsModel, CalculateDiffOptions, HistorySrv} from './history_srv'; + +export class HistoryListCtrl { + appending: boolean; + dashboard: DashboardModel; + delta: { basic: string; json: string; }; + diff: string; + limit: number; + loading: boolean; + max: number; + mode: string; + revisions: RevisionsModel[]; + start: number; + newInfo: RevisionsModel; + baseInfo: RevisionsModel; + canCompare: boolean; + isNewLatest: boolean; + + /** @ngInject */ + constructor(private $scope, + private $rootScope, + private $location, + private $window, + private $timeout, + private $q, + private historySrv: HistorySrv) { + + this.appending = false; + this.diff = 'basic'; + this.limit = 10; + this.loading = false; + this.max = 2; + this.mode = 'list'; + this.start = 0; + this.canCompare = false; + + this.$rootScope.onAppEvent('dashboard-saved', this.onDashboardSaved.bind(this), $scope); + this.resetFromSource(); + } + + onDashboardSaved() { + this.resetFromSource(); + } + + switchMode(mode: string) { + this.mode = mode; + if (this.mode === 'list') { + this.reset(); + } + } + + dismiss() { + this.$rootScope.appEvent('hide-dash-editor'); + } + + addToLog() { + this.start = this.start + this.limit; + this.getLog(true); + } + + revisionSelectionChanged() { + let selected = _.filter(this.revisions, {checked: true}).length; + this.canCompare = selected === 2; + } + + formatDate(date) { + return this.dashboard.formatDate(date); + } + + formatBasicDate(date) { + const now = this.dashboard.timezone === 'browser' ? moment() : moment.utc(); + const then = this.dashboard.timezone === 'browser' ? moment(date) : moment.utc(date); + return then.from(now); + } + + getDiff(diff: string) { + this.diff = diff; + this.mode = 'compare'; + + // have it already been fetched? + if (this.delta[this.diff]) { + return this.$q.when(this.delta[this.diff]); + } + + const selected = _.filter(this.revisions, {checked: true}); + + this.newInfo = selected[0]; + this.baseInfo = selected[1]; + this.isNewLatest = this.newInfo.version === this.dashboard.version; + + this.loading = true; + const options: CalculateDiffOptions = { + new: { + dashboardId: this.dashboard.id, + version: this.newInfo.version, + }, + base: { + dashboardId: this.dashboard.id, + version: this.baseInfo.version, + }, + diffType: diff, + }; + + return this.historySrv.calculateDiff(options).then(response => { + this.delta[this.diff] = response; + }).catch(() => { + this.mode = 'list'; + }).finally(() => { + this.loading = false; + }); + } + + getLog(append = false) { + this.loading = !append; + this.appending = append; + const options: HistoryListOpts = { + limit: this.limit, + start: this.start, + }; + + return this.historySrv.getHistoryList(this.dashboard, options).then(revisions => { + // set formated dates & default values + for (let rev of revisions) { + rev.createdDateString = this.formatDate(rev.created); + rev.ageString = this.formatBasicDate(rev.created); + rev.checked = false; + } + + this.revisions = append ? this.revisions.concat(revisions) : revisions; + + }).catch(err => { + this.loading = false; + }).finally(() => { + this.loading = false; + this.appending = false; + }); + } + + isLastPage() { + return _.find(this.revisions, rev => rev.version === 1); + } + + reset() { + this.delta = { basic: '', json: '' }; + this.diff = 'basic'; + this.mode = 'list'; + this.revisions = _.map(this.revisions, rev => _.extend({}, rev, { checked: false })); + this.canCompare = false; + this.start = 0; + this.isNewLatest = false; + } + + resetFromSource() { + this.revisions = []; + return this.getLog().then(this.reset.bind(this)); + } + + restore(version: number) { + this.$rootScope.appEvent('confirm-modal', { + title: 'Restore version', + text: '', + text2: `Are you sure you want to restore the dashboard to version ${version}? All unsaved changes will be lost.`, + icon: 'fa-history', + yesText: `Yes, restore to version ${version}`, + onConfirm: this.restoreConfirm.bind(this, version), + }); + } + + restoreConfirm(version: number) { + this.loading = true; + return this.historySrv.restoreDashboard(this.dashboard, version).then(response => { + this.$location.path('dashboard/db/' + response.slug); + this.$rootScope.appEvent('alert-success', ['Dashboard restored', 'Restored from version ' + version]); + }).catch(() => { + this.mode = 'list'; + this.loading = false; + }); + } +} + +export function dashboardHistoryDirective() { + return { + restrict: 'E', + templateUrl: 'public/app/features/dashboard/history/history.html', + controller: HistoryListCtrl, + bindToController: true, + controllerAs: 'ctrl', + scope: { + dashboard: "=" + } + }; +} + +angular.module('grafana.directives').directive('gfDashboardHistory', dashboardHistoryDirective); diff --git a/public/app/features/dashboard/history/history_srv.ts b/public/app/features/dashboard/history/history_srv.ts new file mode 100644 index 00000000000..6517c36e1a7 --- /dev/null +++ b/public/app/features/dashboard/history/history_srv.ts @@ -0,0 +1,55 @@ +/// + +import _ from 'lodash'; +import coreModule from 'app/core/core_module'; +import {DashboardModel} from '../model'; + +export interface HistoryListOpts { + limit: number; + start: number; +} + +export interface RevisionsModel { + id: number; + checked: boolean; + dashboardId: number; + parentVersion: number; + version: number; + created: Date; + createdBy: string; + message: string; +} + +export interface CalculateDiffOptions { + new: DiffTarget; + base: DiffTarget; + diffType: string; +} + +export interface DiffTarget { + dashboardId: number; + version: number; + unsavedDashboard?: DashboardModel; // when doing diffs against unsaved dashboard version +} + +export class HistorySrv { + /** @ngInject */ + constructor(private backendSrv, private $q) {} + + getHistoryList(dashboard: DashboardModel, options: HistoryListOpts) { + const id = dashboard && dashboard.id ? dashboard.id : void 0; + return id ? this.backendSrv.get(`api/dashboards/id/${id}/versions`, options) : this.$q.when([]); + } + + calculateDiff(options: CalculateDiffOptions) { + return this.backendSrv.post('api/dashboards/calculate-diff', options); + } + + restoreDashboard(dashboard: DashboardModel, version: number) { + const id = dashboard && dashboard.id ? dashboard.id : void 0; + const url = `api/dashboards/id/${id}/restore`; + return id && _.isNumber(version) ? this.backendSrv.post(url, { version }) : this.$q.when({}); + } +} + +coreModule.service('historySrv', HistorySrv); diff --git a/public/app/features/dashboard/model.ts b/public/app/features/dashboard/model.ts index e31a6c1afd0..cdb61ac4ad7 100644 --- a/public/app/features/dashboard/model.ts +++ b/public/app/features/dashboard/model.ts @@ -193,32 +193,22 @@ export class DashboardModel { }); } - toggleEditMode() { - if (!this.meta.canEdit) { - console.log('Not allowed to edit dashboard'); - return; - } - - this.editMode = !this.editMode; - this.updateSubmenuVisibility(); - this.events.emit('edit-mode-changed', this.editMode); - } - setPanelFocus(id) { this.meta.focusPanelId = id; } updateSubmenuVisibility() { - if (this.editMode) { - this.meta.submenuEnabled = true; - return; - } + this.meta.submenuEnabled = (() => { + if (this.links.length > 0) { return true; } - var visibleVars = _.filter(this.templating.list, function(template) { - return template.hide !== 2; - }); + var visibleVars = _.filter(this.templating.list, variable => variable.hide !== 2); + if (visibleVars.length > 0) { return true; } - this.meta.submenuEnabled = visibleVars.length > 0 || this.annotations.list.length > 0 || this.links.length > 0; + var visibleAnnotations = _.filter(this.annotations.list, annotation => annotation.hide !== true); + if (visibleAnnotations.length > 0) { return true; } + + return false; + })(); } getPanelInfoById(panelId) { @@ -254,7 +244,7 @@ export class DashboardModel { return newPanel; } - formatDate(date, format) { + formatDate(date, format?) { date = moment.isMoment(date) ? date : moment(date); format = format || 'YYYY-MM-DD HH:mm:ss'; this.timezone = this.getTimezone(); diff --git a/public/app/features/dashboard/partials/addAnnotationModal.html b/public/app/features/dashboard/partials/addAnnotationModal.html new file mode 100644 index 00000000000..f55f888375f --- /dev/null +++ b/public/app/features/dashboard/partials/addAnnotationModal.html @@ -0,0 +1,65 @@ + + diff --git a/public/app/features/dashboard/partials/dash_list.html b/public/app/features/dashboard/partials/dash_list.html deleted file mode 100644 index 3058715e0e6..00000000000 --- a/public/app/features/dashboard/partials/dash_list.html +++ /dev/null @@ -1,10 +0,0 @@ - - - -
    - -
    - - diff --git a/public/app/features/dashboard/partials/graphiteImport.html b/public/app/features/dashboard/partials/graphiteImport.html deleted file mode 100644 index d23d506d17f..00000000000 --- a/public/app/features/dashboard/partials/graphiteImport.html +++ /dev/null @@ -1,35 +0,0 @@ -
    - -
    -

    Load dashboard from Graphite-Web

    - -
    -
    -
    - Data source -
    -
    -
    - -
    -
    -
    - -
    -
    -
    - - - - - - -
    {{dash.name}} - -
    - -
    -
    diff --git a/public/app/features/dashboard/partials/saveDashboardAs.html b/public/app/features/dashboard/partials/saveDashboardAs.html deleted file mode 100644 index 106f54fb85f..00000000000 --- a/public/app/features/dashboard/partials/saveDashboardAs.html +++ /dev/null @@ -1,27 +0,0 @@ - - diff --git a/public/app/features/dashboard/row/row_ctrl.ts b/public/app/features/dashboard/row/row_ctrl.ts index ce92821dfef..34b03b3c3be 100644 --- a/public/app/features/dashboard/row/row_ctrl.ts +++ b/public/app/features/dashboard/row/row_ctrl.ts @@ -216,7 +216,6 @@ coreModule.directive('panelDropZone', function($timeout) { } if (indrag === true) { - var dropZoneSpan = 12 - row.span; if (dropZoneSpan > 1) { return showPanel(dropZoneSpan, 'Drop Here'); } diff --git a/public/app/features/dashboard/saveDashboardAsCtrl.js b/public/app/features/dashboard/saveDashboardAsCtrl.js deleted file mode 100644 index 09ac7f55d2e..00000000000 --- a/public/app/features/dashboard/saveDashboardAsCtrl.js +++ /dev/null @@ -1,64 +0,0 @@ -define([ - 'angular', -], -function (angular) { - 'use strict'; - - var module = angular.module('grafana.controllers'); - - module.controller('SaveDashboardAsCtrl', function($scope, backendSrv, $location) { - - $scope.init = function() { - $scope.clone.id = null; - $scope.clone.editable = true; - $scope.clone.title = $scope.clone.title + " Copy"; - - // remove alerts - $scope.clone.rows.forEach(function(row) { - row.panels.forEach(function(panel) { - delete panel.alert; - }); - }); - - // remove auto update - delete $scope.clone.autoUpdate; - }; - - function saveDashboard(options) { - return backendSrv.saveDashboard($scope.clone, options).then(function(result) { - $scope.appEvent('alert-success', ['Dashboard saved', 'Saved as ' + $scope.clone.title]); - - $location.url('/dashboard/db/' + result.slug); - - $scope.appEvent('dashboard-saved', $scope.clone); - $scope.dismiss(); - }); - } - - $scope.keyDown = function (evt) { - if (evt.keyCode === 13) { - $scope.saveClone(); - } - }; - - $scope.saveClone = function() { - saveDashboard({overwrite: false}).then(null, function(err) { - if (err.data && err.data.status === "name-exists") { - err.isHandled = true; - - $scope.appEvent('confirm-modal', { - title: 'Conflict', - text: 'Dashboard with the same name exists.', - text2: 'Would you still like to save this dashboard?', - yesText: "Save & Overwrite", - icon: "fa-warning", - onConfirm: function() { - saveDashboard({overwrite: true}); - } - }); - } - }); - }; - }); - -}); diff --git a/public/app/features/dashboard/save_as_modal.ts b/public/app/features/dashboard/save_as_modal.ts new file mode 100644 index 00000000000..0be82755e41 --- /dev/null +++ b/public/app/features/dashboard/save_as_modal.ts @@ -0,0 +1,79 @@ +/// + +import coreModule from 'app/core/core_module'; + +const template = ` + +`; + +export class SaveDashboardAsModalCtrl { + clone: any; + dismiss: () => void; + + /** @ngInject */ + constructor(private $scope, private dashboardSrv) { + var dashboard = this.dashboardSrv.getCurrent(); + this.clone = dashboard.getSaveModelClone(); + this.clone.id = null; + this.clone.title += ' Copy'; + this.clone.editable = true; + this.clone.hideControls = false; + + // remove alerts + this.clone.rows.forEach(row => { + row.panels.forEach(panel => { + delete panel.alert; + }); + }); + + delete this.clone.autoUpdate; + } + + save() { + return this.dashboardSrv.save(this.clone).then(this.dismiss); + } + + keyDown(evt) { + if (evt.keyCode === 13) { + this.save(); + } + } +} + +export function saveDashboardAsDirective() { + return { + restrict: 'E', + template: template, + controller: SaveDashboardAsModalCtrl, + bindToController: true, + controllerAs: 'ctrl', + scope: {dismiss: "&"} + }; +} + +coreModule.directive('saveDashboardAsModal', saveDashboardAsDirective); diff --git a/public/app/features/dashboard/save_modal.ts b/public/app/features/dashboard/save_modal.ts new file mode 100644 index 00000000000..01a42655a5f --- /dev/null +++ b/public/app/features/dashboard/save_modal.ts @@ -0,0 +1,88 @@ +/// + +import coreModule from 'app/core/core_module'; + +const template = ` + +`; + +export class SaveDashboardModalCtrl { + message: string; + max: number; + saveForm: any; + dismiss: () => void; + + /** @ngInject */ + constructor(private $scope, private dashboardSrv) { + this.message = ''; + this.max = 64; + } + + save() { + if (!this.saveForm.$valid) { + return; + } + + var dashboard = this.dashboardSrv.getCurrent(); + var saveModel = dashboard.getSaveModelClone(); + var options = {message: this.message}; + + return this.dashboardSrv.save(saveModel, options).then(this.dismiss); + } +} + +export function saveDashboardModalDirective() { + return { + restrict: 'E', + template: template, + controller: SaveDashboardModalCtrl, + bindToController: true, + controllerAs: 'ctrl', + scope: {dismiss: "&"} + }; +} + +coreModule.directive('saveDashboardModal', saveDashboardModalDirective); diff --git a/public/app/features/dashboard/shareSnapshotCtrl.js b/public/app/features/dashboard/shareSnapshotCtrl.js index 361db2d55b4..846dc0f7ed7 100644 --- a/public/app/features/dashboard/shareSnapshotCtrl.js +++ b/public/app/features/dashboard/shareSnapshotCtrl.js @@ -116,6 +116,7 @@ function (angular, _) { return { name: annotation.name, enable: annotation.enable, + iconColor: annotation.iconColor, snapshotData: annotation.snapshotData }; }).value(); diff --git a/public/app/features/dashboard/specs/dashboard_model_specs.ts b/public/app/features/dashboard/specs/dashboard_model_specs.ts index c7d85b8a190..1c7415d342e 100644 --- a/public/app/features/dashboard/specs/dashboard_model_specs.ts +++ b/public/app/features/dashboard/specs/dashboard_model_specs.ts @@ -364,4 +364,85 @@ describe('DashboardModel', function() { }); }); + describe('updateSubmenuVisibility with empty lists', function() { + var model; + + beforeEach(function() { + model = new DashboardModel({}); + model.updateSubmenuVisibility(); + }); + + it('should not enable submmenu', function() { + expect(model.meta.submenuEnabled).to.be(false); + }); + }); + + describe('updateSubmenuVisibility with annotation', function() { + var model; + + beforeEach(function() { + model = new DashboardModel({ + annotations: { + list: [{}] + } + }); + model.updateSubmenuVisibility(); + }); + + it('should enable submmenu', function() { + expect(model.meta.submenuEnabled).to.be(true); + }); + }); + + describe('updateSubmenuVisibility with template var', function() { + var model; + + beforeEach(function() { + model = new DashboardModel({ + templating: { + list: [{}] + } + }); + model.updateSubmenuVisibility(); + }); + + it('should enable submmenu', function() { + expect(model.meta.submenuEnabled).to.be(true); + }); + }); + + describe('updateSubmenuVisibility with hidden template var', function() { + var model; + + beforeEach(function() { + model = new DashboardModel({ + templating: { + list: [{hide: 2}] + } + }); + model.updateSubmenuVisibility(); + }); + + it('should not enable submmenu', function() { + expect(model.meta.submenuEnabled).to.be(false); + }); + }); + + describe('updateSubmenuVisibility with hidden annotation toggle', function() { + var model; + + beforeEach(function() { + model = new DashboardModel({ + annotations: { + list: [{hide: true}] + } + }); + model.updateSubmenuVisibility(); + }); + + it('should not enable submmenu', function() { + expect(model.meta.submenuEnabled).to.be(false); + }); + }); + }); diff --git a/public/app/features/dashboard/specs/history_ctrl_specs.ts b/public/app/features/dashboard/specs/history_ctrl_specs.ts new file mode 100644 index 00000000000..0bb09546a34 --- /dev/null +++ b/public/app/features/dashboard/specs/history_ctrl_specs.ts @@ -0,0 +1,315 @@ +import {describe, beforeEach, it, sinon, expect, angularMocks} from 'test/lib/common'; + +import _ from 'lodash'; +import {HistoryListCtrl} from 'app/features/dashboard/history/history'; +import { versions, compare, restore } from 'test/mocks/history-mocks'; +import config from 'app/core/config'; + +describe('HistoryListCtrl', function() { + var RESTORE_ID = 4; + + var ctx: any = {}; + var versionsResponse: any = versions(); + var restoreResponse: any = restore(7, RESTORE_ID); + + beforeEach(angularMocks.module('grafana.core')); + beforeEach(angularMocks.module('grafana.services')); + beforeEach(angularMocks.inject($rootScope => { + ctx.scope = $rootScope.$new(); + })); + + var historySrv; + var $rootScope; + beforeEach(function() { + historySrv = { + getHistoryList: sinon.stub(), + calculateDiff: sinon.stub(), + restoreDashboard: sinon.stub(), + }; + $rootScope = { + appEvent: sinon.spy(), + onAppEvent: sinon.spy(), + }; + }); + + describe('when the history list component is loaded', function() { + var deferred; + + beforeEach(angularMocks.inject(($controller, $q) => { + deferred = $q.defer(); + historySrv.getHistoryList.returns(deferred.promise); + ctx.ctrl = $controller(HistoryListCtrl, { + historySrv, + $rootScope, + $scope: ctx.scope, + }, { + dashboard: { + id: 2, + version: 3, + formatDate: sinon.stub().returns('date'), + } + }); + })); + + it('should immediately attempt to fetch the history list', function() { + expect(historySrv.getHistoryList.calledOnce).to.be(true); + }); + + describe('and the history list is successfully fetched', function() { + beforeEach(function() { + deferred.resolve(versionsResponse); + ctx.ctrl.$scope.$apply(); + }); + + it('should reset the controller\'s state', function() { + expect(ctx.ctrl.mode).to.be('list'); + expect(ctx.ctrl.delta).to.eql({ basic: '', json: '' }); + expect(ctx.ctrl.canCompare).to.be(false); + expect(_.find(ctx.ctrl.revisions, rev => rev.checked)).to.be.undefined; + }); + + it('should indicate loading has finished', function() { + expect(ctx.ctrl.loading).to.be(false); + }); + + it('should store the revisions sorted desc by version id', function() { + expect(ctx.ctrl.revisions[0].version).to.be(4); + expect(ctx.ctrl.revisions[1].version).to.be(3); + expect(ctx.ctrl.revisions[2].version).to.be(2); + expect(ctx.ctrl.revisions[3].version).to.be(1); + }); + + it('should add a checked property to each revision', function() { + var actual = _.filter(ctx.ctrl.revisions, rev => rev.hasOwnProperty('checked')); + expect(actual.length).to.be(4); + }); + + it('should set all checked properties to false on reset', function() { + ctx.ctrl.revisions[0].checked = true; + ctx.ctrl.revisions[2].checked = true; + ctx.ctrl.reset(); + var actual = _.filter(ctx.ctrl.revisions, rev => !rev.checked); + expect(actual.length).to.be(4); + }); + + }); + + describe('and fetching the history list fails', function() { + beforeEach(function() { + deferred.reject(new Error('HistoryListError')); + ctx.ctrl.$scope.$apply(); + }); + + it('should reset the controller\'s state', function() { + expect(ctx.ctrl.mode).to.be('list'); + expect(ctx.ctrl.delta).to.eql({ basic: '', json: '' }); + expect(_.find(ctx.ctrl.revisions, rev => rev.checked)).to.be.undefined; + }); + + it('should indicate loading has finished', function() { + expect(ctx.ctrl.loading).to.be(false); + }); + + it('should have an empty revisions list', function() { + expect(ctx.ctrl.revisions).to.eql([]); + }); + }); + + describe('should update the history list when the dashboard is saved', function() { + beforeEach(function() { + ctx.ctrl.dashboard = {version: 3 }; + ctx.ctrl.resetFromSource = sinon.spy(); + }); + + it('should listen for the `dashboard-saved` appEvent', function() { + expect($rootScope.onAppEvent.calledOnce).to.be(true); + expect($rootScope.onAppEvent.getCall(0).args[0]).to.be('dashboard-saved'); + }); + + it('should call `onDashboardSaved` when the appEvent is received', function() { + expect($rootScope.onAppEvent.getCall(0).args[1]).to.not.be(ctx.ctrl.onDashboardSaved); + expect($rootScope.onAppEvent.getCall(0).args[1].toString).to.be(ctx.ctrl.onDashboardSaved.toString); + }); + }); + }); + + describe('when the user wants to compare two revisions', function() { + var deferred; + + beforeEach(angularMocks.inject(($controller, $q) => { + deferred = $q.defer(); + historySrv.getHistoryList.returns($q.when(versionsResponse)); + historySrv.calculateDiff.returns(deferred.promise); + ctx.ctrl = $controller(HistoryListCtrl, { + historySrv, + $rootScope, + $scope: ctx.scope, + }, { + dashboard: { + id: 2, + version: 3, + formatDate: sinon.stub().returns('date'), + } + }); + + ctx.ctrl.$scope.onDashboardSaved = sinon.spy(); + ctx.ctrl.$scope.$apply(); + })); + + it('should have already fetched the history list', function() { + expect(historySrv.getHistoryList.calledOnce).to.be(true); + expect(ctx.ctrl.revisions.length).to.be.above(0); + }); + + it('should check that two valid versions are selected', function() { + // [] + expect(ctx.ctrl.canCompare).to.be(false); + + // single value + ctx.ctrl.revisions = [{checked: true}]; + ctx.ctrl.revisionSelectionChanged(); + expect(ctx.ctrl.canCompare).to.be(false); + + // both values in range + ctx.ctrl.revisions = [{checked: true}, {checked: true}]; + ctx.ctrl.revisionSelectionChanged(); + expect(ctx.ctrl.canCompare).to.be(true); + }); + + describe('and the basic diff is successfully fetched', function() { + beforeEach(function() { + deferred.resolve(compare('basic')); + ctx.ctrl.revisions[1].checked = true; + ctx.ctrl.revisions[3].checked = true; + ctx.ctrl.getDiff('basic'); + ctx.ctrl.$scope.$apply(); + }); + + it('should fetch the basic diff if two valid versions are selected', function() { + expect(historySrv.calculateDiff.calledOnce).to.be(true); + expect(ctx.ctrl.delta.basic).to.be('
    '); + expect(ctx.ctrl.delta.json).to.be(''); + }); + + it('should set the basic diff view as active', function() { + expect(ctx.ctrl.mode).to.be('compare'); + expect(ctx.ctrl.diff).to.be('basic'); + }); + + it('should indicate loading has finished', function() { + expect(ctx.ctrl.loading).to.be(false); + }); + }); + + describe('and the json diff is successfully fetched', function() { + beforeEach(function() { + deferred.resolve(compare('json')); + ctx.ctrl.revisions[1].checked = true; + ctx.ctrl.revisions[3].checked = true; + ctx.ctrl.getDiff('json'); + ctx.ctrl.$scope.$apply(); + }); + + it('should fetch the json diff if two valid versions are selected', function() { + expect(historySrv.calculateDiff.calledOnce).to.be(true); + expect(ctx.ctrl.delta.basic).to.be(''); + expect(ctx.ctrl.delta.json).to.be('
    '); + }); + + it('should set the json diff view as active', function() { + expect(ctx.ctrl.mode).to.be('compare'); + expect(ctx.ctrl.diff).to.be('json'); + }); + + it('should indicate loading has finished', function() { + expect(ctx.ctrl.loading).to.be(false); + }); + }); + + describe('and diffs have already been fetched', function() { + beforeEach(function() { + deferred.resolve(compare('basic')); + ctx.ctrl.revisions[3].checked = true; + ctx.ctrl.revisions[1].checked = true; + ctx.ctrl.delta.basic = 'cached basic'; + ctx.ctrl.getDiff('basic'); + ctx.ctrl.$scope.$apply(); + }); + + it('should use the cached diffs instead of fetching', function() { + expect(historySrv.calculateDiff.calledOnce).to.be(false); + expect(ctx.ctrl.delta.basic).to.be('cached basic'); + }); + + it('should indicate loading has finished', function() { + expect(ctx.ctrl.loading).to.be(false); + }); + }); + + describe('and fetching the diff fails', function() { + beforeEach(function() { + deferred.reject(new Error('DiffError')); + ctx.ctrl.revisions[3].checked = true; + ctx.ctrl.revisions[1].checked = true; + ctx.ctrl.getDiff('basic'); + ctx.ctrl.$scope.$apply(); + }); + + it('should fetch the diff if two valid versions are selected', function() { + expect(historySrv.calculateDiff.calledOnce).to.be(true); + }); + + it('should return to the history list view', function() { + expect(ctx.ctrl.mode).to.be('list'); + }); + + it('should indicate loading has finished', function() { + expect(ctx.ctrl.loading).to.be(false); + }); + + it('should have an empty delta/changeset', function() { + expect(ctx.ctrl.delta).to.eql({ basic: '', json: '' }); + }); + }); + }); + + describe('when the user wants to restore a revision', function() { + var deferred; + + beforeEach(angularMocks.inject(($controller, $q) => { + deferred = $q.defer(); + historySrv.getHistoryList.returns($q.when(versionsResponse)); + historySrv.restoreDashboard.returns(deferred.promise); + ctx.ctrl = $controller(HistoryListCtrl, { + historySrv, + contextSrv: { user: { name: 'Carlos' }}, + $rootScope, + $scope: ctx.scope, + }); + ctx.ctrl.dashboard = { id: 1 }; + ctx.ctrl.restore(); + ctx.ctrl.$scope.$apply(); + })); + + it('should display a modal allowing the user to restore or cancel', function() { + expect($rootScope.appEvent.calledOnce).to.be(true); + expect($rootScope.appEvent.calledWith('confirm-modal')).to.be(true); + }); + + describe('and restore fails to fetch', function() { + beforeEach(function() { + deferred.reject(new Error('RestoreError')); + ctx.ctrl.restoreConfirm(RESTORE_ID); + try { + // this throws error, due to promise rejection + ctx.ctrl.$scope.$apply(); + } catch (e) {} + }); + + it('should indicate loading has finished', function() { + expect(ctx.ctrl.loading).to.be(false); + }); + + }); + }); +}); diff --git a/public/app/features/dashboard/specs/history_srv_specs.ts b/public/app/features/dashboard/specs/history_srv_specs.ts new file mode 100644 index 00000000000..4678759c438 --- /dev/null +++ b/public/app/features/dashboard/specs/history_srv_specs.ts @@ -0,0 +1,70 @@ +import {describe, beforeEach, it, sinon, expect, angularMocks} from 'test/lib/common'; + +import helpers from 'test/specs/helpers'; +import HistorySrv from '../history/history_srv'; +import { versions, compare, restore } from 'test/mocks/history-mocks'; + +describe('historySrv', function() { + var ctx = new helpers.ServiceTestContext(); + + var versionsResponse = versions(); + var restoreResponse = restore; + + beforeEach(angularMocks.module('grafana.core')); + beforeEach(angularMocks.module('grafana.services')); + beforeEach(angularMocks.inject(function($httpBackend) { + ctx.$httpBackend = $httpBackend; + $httpBackend.whenRoute('GET', 'api/dashboards/id/:id/versions').respond(versionsResponse); + $httpBackend.whenRoute('POST', 'api/dashboards/id/:id/restore') + .respond(function(method, url, data, headers, params) { + const parsedData = JSON.parse(data); + return [200, restoreResponse(parsedData.version)]; + }); + })); + beforeEach(ctx.createService('historySrv')); + + describe('getHistoryList', function() { + it('should return a versions array for the given dashboard id', function(done) { + ctx.service.getHistoryList({ id: 1 }).then(function(versions) { + expect(versions).to.eql(versionsResponse); + done(); + }); + ctx.$httpBackend.flush(); + }); + + it('should return an empty array when not given an id', function(done) { + ctx.service.getHistoryList({ }).then(function(versions) { + expect(versions).to.eql([]); + done(); + }); + ctx.$httpBackend.flush(); + }); + + it('should return an empty array when not given a dashboard', function(done) { + ctx.service.getHistoryList().then(function(versions) { + expect(versions).to.eql([]); + done(); + }); + ctx.$httpBackend.flush(); + }); + }); + + describe('restoreDashboard', function() { + it('should return a success response given valid parameters', function(done) { + var version = 6; + ctx.service.restoreDashboard({ id: 1 }, version).then(function(response) { + expect(response).to.eql(restoreResponse(version)); + done(); + }); + ctx.$httpBackend.flush(); + }); + + it('should return an empty object when not given an id', function(done) { + ctx.service.restoreDashboard({}, 6).then(function(response) { + expect(response).to.eql({}); + done(); + }); + ctx.$httpBackend.flush(); + }); + }); +}); diff --git a/public/app/features/dashboard/submenu/submenu.html b/public/app/features/dashboard/submenu/submenu.html index 3e09fe4425e..ce3c61f1cc3 100644 --- a/public/app/features/dashboard/submenu/submenu.html +++ b/public/app/features/dashboard/submenu/submenu.html @@ -1,4 +1,4 @@ -