mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge branch 'master' into graphite-seriesbytag
This commit is contained in:
@@ -1,13 +1,6 @@
|
||||
# http://editorconfig.org
|
||||
root = true
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
@@ -15,5 +8,12 @@ charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -41,6 +41,14 @@ profile.cov
|
||||
.notouch
|
||||
/pkg/cmd/grafana-cli/grafana-cli
|
||||
/pkg/cmd/grafana-server/grafana-server
|
||||
/pkg/cmd/grafana-server/debug
|
||||
/examples/*/dist
|
||||
/packaging/**/*.rpm
|
||||
/packaging/**/*.deb
|
||||
|
||||
/vendor/**/*.py
|
||||
/vendor/**/*.xml
|
||||
/vendor/**/*.yml
|
||||
/vendor/**/*_test.go
|
||||
/vendor/**/.editorconfig
|
||||
/vendor/**/appengine*
|
||||
|
||||
12
CHANGELOG.md
12
CHANGELOG.md
@@ -14,12 +14,17 @@
|
||||
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
|
||||
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
|
||||
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
|
||||
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/6710)
|
||||
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/2764)
|
||||
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)
|
||||
* **Prometheus**: Add support for instant queries [#5765](https://github.com/grafana/grafana/issues/5765), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Cloudwatch**: Add support for alerting using the cloudwatch datasource [#8050](https://github.com/grafana/grafana/pull/8050), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Pagerduty**: Include triggering series in pagerduty notification [#8479](https://github.com/grafana/grafana/issues/8479), thx [@rickymoorhouse](https://github.com/rickymoorhouse)
|
||||
* **Timezone**: Time ranges like Today & Yesterday now work correctly when timezone setting is set to UTC [#8916](https://github.com/grafana/grafana/issues/8916), thx [@ctide](https://github.com/ctide)
|
||||
* **Prometheus**: Align $__interval with the step parameters. [#9226](https://github.com/grafana/grafana/pull/9226), thx [@alin-amana](https://github.com/alin-amana)
|
||||
* **Prometheus**: Autocomplete for label name and label value [#9208](https://github.com/grafana/grafana/pull/9208), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Postgres**: New Postgres data source [#9209](https://github.com/grafana/grafana/pull/9209), thx [@svenklemm](https://github.com/svenklemm)
|
||||
* **Datasources**: Make datasource HTTP requests verify TLS by default. closes [#9371](https://github.com/grafana/grafana/issues/9371), [#5334](https://github.com/grafana/grafana/issues/5334), [#8812](https://github.com/grafana/grafana/issues/8812), thx [@mattbostock](https://github.com/mattbostock)
|
||||
* **OAuth**: Verify TLS during OAuth callback [#9373](https://github.com/grafana/grafana/issues/9373), thx [@mattbostock](https://github.com/mattbostock)
|
||||
|
||||
## Minor
|
||||
* **SMTP**: Make it possible to set specific EHLO for smtp client. [#9319](https://github.com/grafana/grafana/issues/9319)
|
||||
@@ -27,9 +32,14 @@
|
||||
* **HTTP**: set net.Dialer.DualStack to true for all http clients [#9367](https://github.com/grafana/grafana/pull/9367)
|
||||
* **Alerting**: Add diff and percent diff as series reducers [#9386](https://github.com/grafana/grafana/pull/9386), thx [@shanhuhai5739](https://github.com/shanhuhai5739)
|
||||
* **Slack**: Allow images to be uploaded to slack when Token is precent [#7175](https://github.com/grafana/grafana/issues/7175), thx [@xginn8](https://github.com/xginn8)
|
||||
* **Opsgenie**: Use their latest API instead of old version [#9399](https://github.com/grafana/grafana/pull/9399), thx [@cglrkn](https://github.com/cglrkn)
|
||||
* **Table**: Add support for displaying the timestamp with milliseconds [#9429](https://github.com/grafana/grafana/pull/9429), thx [@s1061123](https://github.com/s1061123)
|
||||
* **Hipchat**: Add metrics, message and image to hipchat notifications [#9110](https://github.com/grafana/grafana/issues/9110), thx [@eloo](https://github.com/eloo)
|
||||
* **Kafka**: Add support for sending alert notifications to kafka [#7104](https://github.com/grafana/grafana/issues/7104), thx [@utkarshcmu](https://github.com/utkarshcmu)
|
||||
|
||||
## Tech
|
||||
* **Go**: Grafana is now built using golang 1.9
|
||||
* **Webpack**: Changed from systemjs to webpack (see readme or building from source guide for new build instructions). Systemjs is still used to load plugins but now plugins can only import a limited set of dependencies. See [PLUGIN_DEV.md](https://github.com/grafana/grafana/blob/master/PLUGIN_DEV.md) for more details on how this can effect some plugins.
|
||||
|
||||
# 4.5.2 (2017-09-22)
|
||||
|
||||
|
||||
28
PLUGIN_DEV.md
Normal file
28
PLUGIN_DEV.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Plugin Development
|
||||
|
||||
This document is not meant as complete guide for developing plugins but more as a changelog for changes in
|
||||
Grafana that can impact plugin development. When ever you as plugin author encounter an issue with your plugin after
|
||||
upgrading Grafana please check here before creating an issue.
|
||||
|
||||
## Links
|
||||
|
||||
- [Datasource plugin written in typescript](https://github.com/grafana/typescript-template-datasource)
|
||||
- [Simple json dataource plugin](https://github.com/grafana/simple-json-datasource)
|
||||
- [Plugin development guide](http://docs.grafana.org/plugins/developing/development/)
|
||||
|
||||
## Changes in v4.6
|
||||
|
||||
This version of Grafana has big changes that will impact a limited set of plugins. We moved from systemjs to webpack
|
||||
for built-in plugins & everything internal. External plugins still use systemjs but now with a limited
|
||||
set of Grafana components they can import. Plugins can depend on libs like lodash & moment and internal components
|
||||
like before using the same import paths. However since everything in Grafana is no longer accessible, a few plugins could encounter issues when importing a Grafana dependency.
|
||||
|
||||
[List of exposed components plugins can import/require](https://github.com/grafana/grafana/blob/master/public/app/features/plugins/plugin_loader.ts#L48)
|
||||
|
||||
If you think we missed exposing a crucial lib or Grafana component let us know by opening an issue.
|
||||
|
||||
### Deprecated components
|
||||
|
||||
The angular directive `<spectrum-picker>` is no deprecated (will still work for a version more) but we recommend plugin authors
|
||||
to upgrade to new `<color-picker color="ctrl.color" onChange="ctrl.onSparklineColorChange"></color-picker>`
|
||||
|
||||
11
README.md
11
README.md
@@ -45,7 +45,7 @@ yarn install --pure-lockfile
|
||||
npm run build
|
||||
```
|
||||
|
||||
To rebuild frontend assets (typesript, sass etc) as you change them start the watcher via.
|
||||
To rebuild frontend assets (typescript, sass etc) as you change them start the watcher via.
|
||||
|
||||
```bash
|
||||
npm run watch
|
||||
@@ -82,10 +82,17 @@ You only need to add the options you want to override. Config files are applied
|
||||
In your custom.ini uncomment (remove the leading `;`) sign. And set `app_mode = development`.
|
||||
|
||||
## Contribute
|
||||
|
||||
If you have any idea for an improvement or found a bug do not hesitate to open an issue.
|
||||
And if you have time clone this repo and submit a pull request and help me make Grafana
|
||||
the kickass metrics & devops dashboard we all dream about!
|
||||
|
||||
## Plugin development
|
||||
|
||||
Checkout the [Plugin Development Guide](http://docs.grafana.org/plugins/developing/development/) and checkout the [PLUGIN_DEV.md](https://github.com/grafana/grafana/blob/master/PLUGIN_DEV.md) file for changes in Grafana that relate to
|
||||
plugin development.
|
||||
|
||||
## License
|
||||
|
||||
Grafana is distributed under Apache 2.0 License.
|
||||
Work in progress Grafana 2.0 (with included Grafana backend)
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ But it will give you an idea of our current vision and plan.
|
||||
### Long term
|
||||
|
||||
- Backend plugins to support more Auth options, Alerting data sources & notifications
|
||||
- Universial time series transformations for any data source (meta queries)
|
||||
- Universal time series transformations for any data source (meta queries)
|
||||
- Reporting
|
||||
- Web socket & live data streams
|
||||
- Migrate to Angular2 or react
|
||||
|
||||
@@ -7,7 +7,7 @@ clone_folder: c:\gopath\src\github.com\grafana\grafana
|
||||
environment:
|
||||
nodejs_version: "6"
|
||||
GOPATH: c:\gopath
|
||||
GOVERSION: 1.9
|
||||
GOVERSION: 1.9.1
|
||||
|
||||
install:
|
||||
- rmdir c:\go /s /q
|
||||
|
||||
@@ -9,7 +9,7 @@ machine:
|
||||
GOPATH: "/home/ubuntu/.go_workspace"
|
||||
ORG_PATH: "github.com/grafana"
|
||||
REPO_PATH: "${ORG_PATH}/grafana"
|
||||
GODIST: "go1.9.linux-amd64.tar.gz"
|
||||
GODIST: "go1.9.1.linux-amd64.tar.gz"
|
||||
post:
|
||||
- mkdir -p ~/download
|
||||
- mkdir -p ~/docker
|
||||
|
||||
@@ -479,6 +479,7 @@ provider =
|
||||
bucket_url =
|
||||
bucket =
|
||||
region =
|
||||
path =
|
||||
access_key =
|
||||
secret_key =
|
||||
|
||||
|
||||
@@ -424,6 +424,7 @@
|
||||
[external_image_storage.s3]
|
||||
;bucket =
|
||||
;region =
|
||||
;path =
|
||||
;access_key =
|
||||
;secret_key =
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ then there are two flags that can be used to set homepath and the config file pa
|
||||
|
||||
If you have not lost the admin password then it is better to set in the Grafana UI. If you need to set the password in a script then the [Grafana API](http://docs.grafana.org/http_api/user/#change-password) can be used. Here is an example with curl using basic auth:
|
||||
|
||||
```
|
||||
```bash
|
||||
curl -X PUT -H "Content-Type: application/json" -d '{
|
||||
"oldPassword": "admin",
|
||||
"newPassword": "newpass",
|
||||
|
||||
@@ -115,6 +115,17 @@ In DingTalk PC Client:
|
||||
|
||||
Dingtalk supports the following "message type": `text`, `link` and `markdown`. Only the `text` message type is supported.
|
||||
|
||||
### Kafka
|
||||
|
||||
Notifications can be sent to a Kafka topic from Grafana using [Kafka REST Proxy](https://docs.confluent.io/1.0/kafka-rest/docs/index.html).
|
||||
There are couple of configurations options which need to be set in Grafana UI under Kafka Settings:
|
||||
|
||||
1. Kafka REST Proxy endpoint.
|
||||
|
||||
2. Kafka Topic.
|
||||
|
||||
Once these two properties are set, you can send the alerts to Kafka for further processing or throttling them.
|
||||
|
||||
### Other Supported Notification Channels
|
||||
|
||||
Grafana also supports the following Notification Channels:
|
||||
|
||||
@@ -50,11 +50,12 @@ Create a file at `~/.aws/credentials`. That is the `HOME` path for user running
|
||||
|
||||
Example content:
|
||||
|
||||
```bash
|
||||
[default]
|
||||
aws_access_key_id = asdsadasdasdasd
|
||||
aws_secret_access_key = dasdasdsadasdasdasdsa
|
||||
region = us-west-2
|
||||
|
||||
```
|
||||
|
||||
## Metric Query Editor
|
||||
|
||||
@@ -117,7 +118,9 @@ Filters syntax:
|
||||
|
||||
Example `ec2_instance_attribute()` query
|
||||
|
||||
```javascript
|
||||
ec2_instance_attribute(us-east-1, InstanceId, { "tag:Environment": [ "production" ] })
|
||||
```
|
||||
|
||||
### Selecting Attributes
|
||||
|
||||
@@ -156,7 +159,9 @@ Tags can be selected by prepending the tag name with `Tags.`
|
||||
|
||||
Example `ec2_instance_attribute()` query
|
||||
|
||||
```javascript
|
||||
ec2_instance_attribute(us-east-1, Tags.Name, { "tag:Team": [ "sysops" ] })
|
||||
```
|
||||
|
||||
## Cost
|
||||
|
||||
|
||||
@@ -38,8 +38,10 @@ Proxy access means that the Grafana backend will proxy all requests from the bro
|
||||
If you select direct access you must update your Elasticsearch configuration to allow other domains to access
|
||||
Elasticsearch from the browser. You do this by specifying these to options in your **elasticsearch.yml** config file.
|
||||
|
||||
```bash
|
||||
http.cors.enabled: true
|
||||
http.cors.allow-origin: "*"
|
||||
```
|
||||
|
||||
### Index settings
|
||||
|
||||
@@ -133,6 +135,5 @@ Name | Description
|
||||
------------ | -------------
|
||||
Query | You can leave the search query blank or specify a lucene query
|
||||
Time | The name of the time field, needs to be date field.
|
||||
Title | The name of the field to use for the event title.
|
||||
Text | Event description field.
|
||||
Tags | Optional field name to use for event tags (can be an array or a CSV string).
|
||||
Text | Optional field name to use event text body.
|
||||
|
||||
186
docs/sources/features/datasources/postgres.md
Normal file
186
docs/sources/features/datasources/postgres.md
Normal file
@@ -0,0 +1,186 @@
|
||||
+++
|
||||
title = "Using PostgreSQL in Grafana"
|
||||
description = "Guide for using PostgreSQL in Grafana"
|
||||
keywords = ["grafana", "postgresql", "guide"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
name = "PostgreSQL"
|
||||
parent = "datasources"
|
||||
weight = 7
|
||||
+++
|
||||
|
||||
# Using PostgreSQL in Grafana
|
||||
|
||||
Grafana ships with a built-in PostgreSQL data source plugin that allows you to query and visualize data from a PostgreSQL compatible database.
|
||||
|
||||
## Adding the data source
|
||||
|
||||
1. Open the side menu by clicking the Grafana icon in the top header.
|
||||
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
|
||||
3. Click the `+ Add data source` button in the top header.
|
||||
4. Select *PostgreSQL* from the *Type* dropdown.
|
||||
|
||||
### Database User Permissions (Important!)
|
||||
|
||||
The database user you specify when you add the data source should only be granted SELECT permissions on
|
||||
the specified database & tables you want to query. Grafana does not validate that the query is safe. The query
|
||||
could include any SQL statement. For example, statements like `DELETE FROM user;` and `DROP TABLE user;` would be
|
||||
executed. To protect against this we **Highly** recommmend you create a specific postgresql user with restricted permissions.
|
||||
|
||||
Example:
|
||||
|
||||
```sql
|
||||
CREATE USER grafanareader WITH PASSWORD 'password';
|
||||
GRANT USAGE ON SCHEMA schema TO grafanareader;
|
||||
GRANT SELECT ON schema.table TO grafanareader;
|
||||
```
|
||||
|
||||
Make sure the user does not get any unwanted privileges from the public role.
|
||||
|
||||
## Macros
|
||||
|
||||
To simplify syntax and to allow for dynamic parts, like date range filters, the query can contain macros.
|
||||
|
||||
Macro example | Description
|
||||
------------ | -------------
|
||||
*$__time(dateColumn)* | Will be replaced by an expression to rename the column to `time`. For example, *dateColumn as time*
|
||||
*$__timeSec(dateColumn)* | Will be replaced by an expression to rename the column to `time` and converting the value to unix timestamp. For example, *extract(epoch from dateColumn) as time*
|
||||
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn > to_timestamp(1494410783) AND dateColumn < to_timestamp(1494497183)*
|
||||
*$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *to_timestamp(1494410783)*
|
||||
*$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *to_timestamp(1494497183)*
|
||||
*$__timeGroup(dateColumn,'5m')* | Will be replaced by an expression usable in GROUP BY clause. For example, *(extract(epoch from "dateColumn")/extract(epoch from '5m'::interval))::int*
|
||||
*$__unixEpochFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name with times represented as unix timestamp. For example, *dateColumn > 1494410783 AND dateColumn < 1494497183*
|
||||
*$__unixEpochFrom()* | Will be replaced by the start of the currently active time selection as unix timestamp. For example, *1494410783*
|
||||
*$__unixEpochTo()* | Will be replaced by the end of the currently active time selection as unix timestamp. For example, *1494497183*
|
||||
|
||||
We plan to add many more macros. If you have suggestions for what macros you would like to see, please [open an issue](https://github.com/grafana/grafana) in our GitHub repo.
|
||||
|
||||
The query editor has a link named `Generated SQL` that shows up after a query as been executed, while in panel edit mode. Click on it and it will expand and show the raw interpolated SQL string that was executed.
|
||||
|
||||
## Table queries
|
||||
|
||||
If the `Format as` query option is set to `Table` then you can basically do any type of SQL query. The table panel will automatically show the results of whatever columns & rows your query returns.
|
||||
|
||||
Query editor with example query:
|
||||
|
||||

|
||||
|
||||
|
||||
The query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
title as "Title",
|
||||
"user".login as "Created By",
|
||||
dashboard.created as "Created On"
|
||||
FROM dashboard
|
||||
INNER JOIN "user" on "user".id = dashboard.created_by
|
||||
WHERE $__timeFilter(dashboard.created)
|
||||
```
|
||||
|
||||
You can control the name of the Table panel columns by using regular `as ` SQL column selection syntax.
|
||||
|
||||
The resulting table panel:
|
||||
|
||||

|
||||
|
||||
### Time series queries
|
||||
|
||||
If you set `Format as` to `Time series`, for use in Graph panel for example, then the query must return a column named `time` that returns either a sql datetime or any numeric datatype representing unix epoch in seconds.
|
||||
Any column except `time` and `metric` is treated as a value column.
|
||||
You may return a column named `metric` that is used as metric name for the value column.
|
||||
|
||||
Example with `metric` column
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
min(time_date_time) as time,
|
||||
min(value_double),
|
||||
'min' as metric
|
||||
FROM test_data
|
||||
WHERE $__timeFilter(time_date_time)
|
||||
GROUP BY metric1, (extract(epoch from time_date_time)/extract(epoch from $__interval::interval))::int
|
||||
ORDER BY time asc
|
||||
```
|
||||
|
||||
Example with multiple columns:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
min(time_date_time) as time,
|
||||
min(value_double) as min_value,
|
||||
max(value_double) as max_value
|
||||
FROM test_data
|
||||
WHERE $__timeFilter(time_date_time)
|
||||
GROUP BY metric1, (extract(epoch from time_date_time)/extract(epoch from $__interval::interval))::int
|
||||
ORDER BY time asc
|
||||
```
|
||||
|
||||
## Templating
|
||||
|
||||
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data being displayed in your dashboard.
|
||||
|
||||
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different types of template variables.
|
||||
|
||||
### Query Variable
|
||||
|
||||
If you add a template variable of the type `Query`, you can write a PostgreSQL query that can
|
||||
return things like measurement names, key names or key values that are shown as a dropdown select box.
|
||||
|
||||
For example, you can have a variable that contains all values for the `hostname` column in a table if you specify a query like this in the templating variable *Query* setting.
|
||||
|
||||
```sql
|
||||
SELECT hostname FROM host
|
||||
```
|
||||
|
||||
A query can return multiple columns and Grafana will automatically create a list from them. For example, the query below will return a list with values from `hostname` and `hostname2`.
|
||||
|
||||
```sql
|
||||
SELECT host.hostname, other_host.hostname2 FROM host JOIN other_host ON host.city = other_host.city
|
||||
```
|
||||
|
||||
Another option is a query that can create a key/value variable. The query should return two columns that are named `__text` and `__value`. The `__text` column value should be unique (if it is not unique then the first value is used). The options in the dropdown will have a text and value that allows you to have a friendly name as text and an id as the value. An example query with `hostname` as the text and `id` as the value:
|
||||
|
||||
```sql
|
||||
SELECT hostname AS __text, id AS __value FROM host
|
||||
```
|
||||
|
||||
You can also create nested variables. For example if you had another variable named `region`. Then you could have
|
||||
the hosts variable only show hosts from the current selected region with a query like this (if `region` is a multi-value variable then use the `IN` comparison operator rather than `=` to match against multiple values):
|
||||
|
||||
```sql
|
||||
SELECT hostname FROM host WHERE region IN($region)
|
||||
```
|
||||
|
||||
### Using Variables in Queries
|
||||
|
||||
Template variables are quoted automatically so if it is a string value do not wrap them in quotes in where clauses. If the variable is a multi-value variable then use the `IN` comparison operator rather than `=` to match against multiple values.
|
||||
|
||||
There are two syntaxes:
|
||||
|
||||
`$<varname>` Example with a template variable named `hostname`:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
atimestamp as time,
|
||||
aint as value
|
||||
FROM table
|
||||
WHERE $__timeFilter(atimestamp) and hostname in($hostname)
|
||||
ORDER BY atimestamp ASC
|
||||
```
|
||||
|
||||
`[[varname]]` Example with a template variable named `hostname`:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
atimestamp as time,
|
||||
aint as value
|
||||
FROM table
|
||||
WHERE $__timeFilter(atimestamp) and hostname in([[hostname]])
|
||||
ORDER BY atimestamp ASC
|
||||
```
|
||||
|
||||
## Alerting
|
||||
|
||||
Time series queries should work in alerting conditions. Table formatted queries is not yet supported in alert rule
|
||||
conditions.
|
||||
74
docs/sources/guides/whats-new-in-v4-6.md
Normal file
74
docs/sources/guides/whats-new-in-v4-6.md
Normal file
@@ -0,0 +1,74 @@
|
||||
+++
|
||||
title = "What's New in Grafana v4.6"
|
||||
description = "Feature & improvement highlights for Grafana v4.6"
|
||||
keywords = ["grafana", "new", "documentation", "4.6"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
name = "Version 4.6"
|
||||
identifier = "v4.6"
|
||||
parent = "whatsnew"
|
||||
weight = -5
|
||||
+++
|
||||
|
||||
# What's New in Grafana v4.6
|
||||
|
||||
Grafana v4.6 brings many enhancements to Annotations, Cloudwatch & Prometheus. It also adds support for Postgres as metric & table data source!
|
||||
|
||||
### Annotations
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v46/add_annotation_region.png" max-width= "800px" >}}
|
||||
|
||||
You can now add annotation events and regions right from the graph panel! Just hold CTRL/CMD + click or drag region to open the **Add Annotation** view. The
|
||||
[Annotations]({{< relref "reference/annotations.md" >}}) documentation is updated to include details on this new exciting feature.
|
||||
|
||||
### Cloudwatch
|
||||
|
||||
Cloudwatch now supports alerting. Setup alert rules for any Cloudwatch metric!
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v46/cloudwatch_alerting.png" max-width= "800px" >}}
|
||||
|
||||
### Postgres
|
||||
|
||||
Grafana v4.6 now ships with a built-in datasource plugin for Postgres. Have logs or metric data in Postgres? You can now visualize that data and
|
||||
define alert rules on it like any of our other data sources.
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v46/postgres_table_query.png" max-width= "800px" >}}
|
||||
|
||||
### Prometheus
|
||||
|
||||
New enhancements include support for **instant queries** and improvements to query editor in the form of autocomplete for label names and label values.
|
||||
This makes exploring and filtering Prometheus data much easier.
|
||||
|
||||
## Changelog
|
||||
|
||||
### New Features
|
||||
|
||||
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
|
||||
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
|
||||
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
|
||||
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
|
||||
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/2764)
|
||||
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)
|
||||
* **Prometheus**: Add support for instant queries [#5765](https://github.com/grafana/grafana/issues/5765), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Cloudwatch**: Add support for alerting using the cloudwatch datasource [#8050](https://github.com/grafana/grafana/pull/8050), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Pagerduty**: Include triggering series in pagerduty notification [#8479](https://github.com/grafana/grafana/issues/8479), thx [@rickymoorhouse](https://github.com/rickymoorhouse)
|
||||
* **Timezone**: Time ranges like Today & Yesterday now work correctly when timezone setting is set to UTC [#8916](https://github.com/grafana/grafana/issues/8916), thx [@ctide](https://github.com/ctide)
|
||||
* **Prometheus**: Align $__interval with the step parameters. [#9226](https://github.com/grafana/grafana/pull/9226), thx [@alin-amana](https://github.com/alin-amana)
|
||||
* **Prometheus**: Autocomplete for label name and label value [#9208](https://github.com/grafana/grafana/pull/9208), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Postgres**: New Postgres data source [#9209](https://github.com/grafana/grafana/pull/9209), thx [@svenklemm](https://github.com/svenklemm)
|
||||
* **Datasources**: closes [#9371](https://github.com/grafana/grafana/issues/9371), [#5334](https://github.com/grafana/grafana/issues/5334), [#8812](https://github.com/grafana/grafana/issues/8812), thx [@mattbostock](https://github.com/mattbostock)
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* **SMTP**: Make it possible to set specific EHLO for smtp client. [#9319](https://github.com/grafana/grafana/issues/9319)
|
||||
* **Dataproxy**: Allow grafan to renegotiate tls connection [#9250](https://github.com/grafana/grafana/issues/9250)
|
||||
* **HTTP**: set net.Dialer.DualStack to true for all http clients [#9367](https://github.com/grafana/grafana/pull/9367)
|
||||
* **Alerting**: Add diff and percent diff as series reducers [#9386](https://github.com/grafana/grafana/pull/9386), thx [@shanhuhai5739](https://github.com/shanhuhai5739)
|
||||
* **Slack**: Allow images to be uploaded to slack when Token is precent [#7175](https://github.com/grafana/grafana/issues/7175), thx [@xginn8](https://github.com/xginn8)
|
||||
* **Opsgenie**: Use their latest API instead of old version [#9399](https://github.com/grafana/grafana/pull/9399), thx [@cglrkn](https://github.com/cglrkn)
|
||||
* **Table**: Add support for displaying the timestamp with milliseconds [#9429](https://github.com/grafana/grafana/pull/9429), thx [@s1061123](https://github.com/s1061123)
|
||||
* **Hipchat**: Add metrics, message and image to hipchat notifications [#9110](https://github.com/grafana/grafana/issues/9110), thx [@eloo](https://github.com/eloo)
|
||||
|
||||
### Tech
|
||||
* **Go**: Grafana is now built using golang 1.9
|
||||
|
||||
@@ -23,12 +23,15 @@ Only works with Basic Authentication (username and password). See [introduction]
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```bash
|
||||
GET /api/admin/settings
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```bash
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -166,7 +169,8 @@ Only works with Basic Authentication (username and password). See [introduction]
|
||||
"key_file":"",
|
||||
"password":"************",
|
||||
"skip_verify":"false",
|
||||
"user":""},
|
||||
"user":""
|
||||
},
|
||||
"users":{
|
||||
"allow_org_create":"true",
|
||||
"allow_sign_up":"false",
|
||||
@@ -174,7 +178,7 @@ Only works with Basic Authentication (username and password). See [introduction]
|
||||
"auto_assign_org_role":"Viewer"
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
## Grafana Stats
|
||||
|
||||
`GET /api/admin/stats`
|
||||
@@ -183,12 +187,15 @@ Only works with Basic Authentication (username and password). See [introduction]
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```bash
|
||||
GET /api/admin/stats
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```json
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -203,6 +210,7 @@ Only works with Basic Authentication (username and password). See [introduction]
|
||||
"starred_db_count":2,
|
||||
"grafana_admin_count":2
|
||||
}
|
||||
```
|
||||
|
||||
## Global Users
|
||||
|
||||
@@ -211,6 +219,7 @@ Only works with Basic Authentication (username and password). See [introduction]
|
||||
Create new user. Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
```json
|
||||
|
||||
POST /api/admin/users HTTP/1.1
|
||||
Accept: application/json
|
||||
@@ -222,13 +231,16 @@ Create new user. Only works with Basic Authentication (username and password). S
|
||||
"login":"user",
|
||||
"password":"userpassword"
|
||||
}
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```json
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"id":5,"message":"User created"}
|
||||
```
|
||||
|
||||
## Password for User
|
||||
|
||||
@@ -239,18 +251,22 @@ Change password for a specific user.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```json
|
||||
PUT /api/admin/users/2/password HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
|
||||
{"password":"userpassword"}
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```json
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message": "User password updated"}
|
||||
```
|
||||
|
||||
## Permissions
|
||||
|
||||
@@ -260,18 +276,22 @@ Only works with Basic Authentication (username and password). See [introduction]
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```json
|
||||
PUT /api/admin/users/2/permissions HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
|
||||
{"isGrafanaAdmin": true}
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```json
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{message: "User permissions updated"}
|
||||
```
|
||||
|
||||
## Delete global User
|
||||
|
||||
@@ -281,16 +301,20 @@ Only works with Basic Authentication (username and password). See [introduction]
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```json
|
||||
DELETE /api/admin/users/2 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```json
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{message: "User deleted"}
|
||||
```
|
||||
|
||||
## Pause all alerts
|
||||
|
||||
@@ -300,6 +324,7 @@ Only works with Basic Authentication (username and password). See [introduction]
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```json
|
||||
POST /api/admin/pause-all-alerts HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -307,6 +332,7 @@ Only works with Basic Authentication (username and password). See [introduction]
|
||||
{
|
||||
"paused": true
|
||||
}
|
||||
```
|
||||
|
||||
JSON Body schema:
|
||||
|
||||
@@ -314,7 +340,9 @@ JSON Body schema:
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```json
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{state: "new state", message: "alerts pause/un paused", "alertsAffected": 100}
|
||||
```
|
||||
@@ -23,11 +23,12 @@ This API can also be used to create, update and delete alert notifications.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/alerts HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
Querystring Parameters:
|
||||
|
||||
These parameters are used as querystring parameters. For example:
|
||||
@@ -41,6 +42,7 @@ This API can also be used to create, update and delete alert notifications.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
[
|
||||
@@ -63,6 +65,7 @@ This API can also be used to create, update and delete alert notifications.
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
{
|
||||
```
|
||||
|
||||
@@ -70,13 +73,16 @@ This API can also be used to create, update and delete alert notifications.
|
||||
|
||||
`POST /api/alerts/:id/pause`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
POST /api/alerts/1/pause HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
|
||||
The :id query parameter is the id of the alert to be paused or unpaused.
|
||||
|
||||
JSON Body Schema:
|
||||
@@ -90,6 +96,7 @@ This API can also be used to create, update and delete alert notifications.
|
||||
Content-Type: application/json
|
||||
{
|
||||
```
|
||||
|
||||
## Get alert notifications
|
||||
|
||||
`GET /api/alert-notifications`
|
||||
@@ -97,6 +104,7 @@ This API can also be used to create, update and delete alert notifications.
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/alert-notifications HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
@@ -105,6 +113,7 @@ This API can also be used to create, update and delete alert notifications.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -114,6 +123,7 @@ JSON Body Schema:
|
||||
|
||||
`POST /api/alert-notifications`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
POST /api/alert-notifications HTTP/1.1
|
||||
@@ -121,6 +131,7 @@ JSON Body Schema:
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@@ -128,15 +139,17 @@ JSON Body Schema:
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
{
|
||||
```
|
||||
|
||||
## Update alert notification
|
||||
|
||||
`PUT /api/alert-notifications/1`
|
||||
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PUT /api/alert-notifications/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
@@ -148,6 +161,7 @@ JSON Body Schema:
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
{
|
||||
```
|
||||
|
||||
## Delete alert notification
|
||||
|
||||
@@ -155,6 +169,7 @@ JSON Body Schema:
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/alert-notifications/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -168,10 +183,11 @@ JSON Body Schema:
|
||||
Content-Type: application/json
|
||||
{
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
{
|
||||
@@ -183,6 +199,7 @@ JSON Body Schema:
|
||||
"created": "2017-01-01 12:34",
|
||||
"updated": "2017-01-01 12:34"
|
||||
}
|
||||
```
|
||||
|
||||
## Update alert notification
|
||||
|
||||
@@ -190,6 +207,7 @@ JSON Body Schema:
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PUT /api/alert-notifications/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -204,10 +222,11 @@ JSON Body Schema:
|
||||
"addresses: "carl@grafana.com;dev@grafana.com"
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
{
|
||||
@@ -219,6 +238,7 @@ JSON Body Schema:
|
||||
"created": "2017-01-01 12:34",
|
||||
"updated": "2017-01-01 12:34"
|
||||
}
|
||||
```
|
||||
|
||||
## Delete alert notification
|
||||
|
||||
@@ -226,15 +246,19 @@ JSON Body Schema:
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/alert-notifications/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
{
|
||||
"message": "Notification deleted"
|
||||
}
|
||||
```
|
||||
189
docs/sources/http_api/annotations.md
Normal file
189
docs/sources/http_api/annotations.md
Normal file
@@ -0,0 +1,189 @@
|
||||
+++
|
||||
title = "Annotations HTTP API "
|
||||
description = "Grafana Annotations HTTP API"
|
||||
keywords = ["grafana", "http", "documentation", "api", "annotation", "annotations", "comment"]
|
||||
aliases = ["/http_api/annotations/"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
name = "Annotations"
|
||||
identifier = "annotationshttp"
|
||||
parent = "http_api"
|
||||
+++
|
||||
|
||||
# Annotations resources / actions
|
||||
|
||||
This is the API documentation for the new Grafana Annotations feature released in Grafana 4.6. Annotations are saved in the Grafana database (sqlite, mysql or postgres). Annotations can be global annotations that can be shown on any dashboard by configuring an annotation data source - they are filtered by tags. Or they can be tied to a panel on a dashboard and are then only shown on that panel.
|
||||
|
||||
## Find Annotations
|
||||
|
||||
`GET /api/annotations?from=1506676478816&to=1507281278816&tags=tag1&tags=tag2&limit=100`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/annotations?from=1506676478816&to=1507281278816&tags=tag1&tags=tag2&limit=100 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
```
|
||||
|
||||
|
||||
Query Parameters:
|
||||
|
||||
- `from`: epoch datetime in milliseconds. Optional.
|
||||
- `to`: epoch datetime in milliseconds. Optional.
|
||||
- `limit`: number. Optional - default is 10. Max limit for results returned.
|
||||
- `alertId`: number. Optional. Find annotations for a specified alert.
|
||||
- `dashboardId`: number. Optional. Find annotations that are scoped to a specific dashboard
|
||||
- `panelId`: number. Optional. Find annotations that are scoped to a specific panel
|
||||
- `tags`: string. Optional. Use this to filter global annotations. Global annotations are annotations from an annotation data source that are not connected specifically to a dashboard or panel. To do an "AND" filtering with multiple tags, specify the tags parameter multiple times e.g. `tags=tag1&tags=tag2`.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
[
|
||||
```
|
||||
|
||||
## Create Annotation
|
||||
|
||||
Creates an annotation in the Grafana database. The `dashboardId` and `panelId` fields are optional. If they are not specified then a global annotation is created and can be queried in any dashboard that adds the Grafana annotations data source.
|
||||
|
||||
`POST /api/annotations`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```json
|
||||
POST /api/annotations HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"dashboardId":468,
|
||||
"panelId":1,
|
||||
"time":1507037197339,
|
||||
"isRegion":true,
|
||||
"timeEnd":1507180805056,
|
||||
"tags":["tag1","tag2"],
|
||||
"text":"Annotation Description"
|
||||
}
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```json
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Annotation added"}
|
||||
```
|
||||
|
||||
## Update Annotation
|
||||
|
||||
`PUT /api/annotations/:id`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```json
|
||||
PUT /api/annotations/1141 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"time":1507037197339,
|
||||
"isRegion":true,
|
||||
"timeEnd":1507180805056,
|
||||
"text":"Annotation Description",
|
||||
"tags":["tag3","tag4","tag5"]
|
||||
}
|
||||
```
|
||||
|
||||
## Delete Annotation By Id
|
||||
|
||||
`DELETE /api/annotation/:id`
|
||||
|
||||
Deletes the annotation that matches the specified id.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/annotation/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Delete Annotation By RegionId
|
||||
|
||||
`DELETE /api/annotation/region/:id`
|
||||
|
||||
Deletes the annotation that matches the specified region id. A region is an annotation that covers a timerange and has a start and end time. In the Grafana database, this is a stored as two annotations connected by a region id.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/annotation/region/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
Deletes the annotation that matches the specified id.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/annotation/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Annotation deleted"}
|
||||
```
|
||||
|
||||
## Delete Annotation By RegionId
|
||||
|
||||
`DELETE /api/annotation/region/:id`
|
||||
|
||||
Deletes the annotation that matches the specified region id. A region is an annotation that covers a timerange and has a start and end time. In the Grafana database, this is a stored as two annotations connected by a region id.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/annotation/region/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Annotation region deleted"}
|
||||
```
|
||||
@@ -21,7 +21,7 @@ If basic auth is enabled (it is enabled by default) you can authenticate your HT
|
||||
standard basic auth. Basic auth will also authenticate LDAP users.
|
||||
|
||||
curl example:
|
||||
```
|
||||
```bash
|
||||
?curl http://admin:admin@localhost:3000/api/org
|
||||
{"id":1,"name":"Main Org."}
|
||||
```
|
||||
@@ -36,9 +36,11 @@ You use the token in all requests in the `Authorization` header, like this:
|
||||
|
||||
**Example**:
|
||||
|
||||
```http
|
||||
GET http://your.grafana.com/api/dashboards/db/mydash HTTP/1.1
|
||||
Accept: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
The `Authorization` header value should be `Bearer <your api key>`.
|
||||
|
||||
@@ -50,13 +52,16 @@ The `Authorization` header value should be `Bearer <your api key>`.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/auth/keys HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -72,6 +77,7 @@ The `Authorization` header value should be `Bearer <your api key>`.
|
||||
POST /api/auth/keys HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
|
||||
@@ -79,6 +85,7 @@ The `Authorization` header value should be `Bearer <your api key>`.
|
||||
|
||||
- **name** – The key name
|
||||
- **role** – Sets the access level/Grafana Role for the key. Can be one of the following values: `Viewer`, `Editor`, `Read Only Editor` or `Admin`.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
@@ -88,6 +95,7 @@ The `Authorization` header value should be `Bearer <your api key>`.
|
||||
```
|
||||
|
||||
## Delete API Key
|
||||
|
||||
`DELETE /api/auth/keys/:id`
|
||||
|
||||
**Example Request**:
|
||||
@@ -96,10 +104,12 @@ JSON Body schema:
|
||||
DELETE /api/auth/keys/3 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
@@ -107,14 +117,17 @@ JSON Body schema:
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/auth/keys/3 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"API key deleted"}
|
||||
```
|
||||
@@ -158,13 +158,16 @@ Will return the home dashboard.
|
||||
|
||||
`GET /api/search/`
|
||||
|
||||
Query parameters:
|
||||
|
||||
- **query** – Search Query
|
||||
- **tag** – Tag to use
|
||||
- **starred** – Flag indicating if only starred Dashboards should be returned
|
||||
- **tagcloud** - Flag indicating if a tagcloud should be returned
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/search?query=MyDashboard&starred=true&tag=prod HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -205,6 +208,7 @@ Will return the home dashboard.
|
||||
"version":5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Tags for Dashboard
|
||||
|
||||
@@ -215,13 +219,16 @@ Get all tags of dashboards
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/dashboards/tags HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -235,6 +242,7 @@ Get all tags of dashboards
|
||||
"count":4
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Search Dashboards
|
||||
|
||||
@@ -249,13 +257,16 @@ Query parameters:
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/search?query=MyDashboard&starred=true&tag=prod HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -269,3 +280,4 @@ Query parameters:
|
||||
"isStarred":false
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -18,13 +18,16 @@ parent = "http_api"
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/datasources HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -46,6 +49,7 @@ parent = "http_api"
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
@@ -53,13 +57,16 @@ parent = "http_api"
|
||||
## Get a single data source by Name
|
||||
|
||||
`GET /api/datasources/name/:name`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/datasources/name/test_datasource HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
@@ -79,6 +86,7 @@ parent = "http_api"
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@@ -86,13 +94,16 @@ parent = "http_api"
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Create data source
|
||||
|
||||
`POST /api/datasources`
|
||||
|
||||
**Example Graphite Request**:
|
||||
|
||||
```http
|
||||
POST /api/datasources HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
@@ -112,6 +123,7 @@ parent = "http_api"
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
@@ -119,19 +131,23 @@ parent = "http_api"
|
||||
## Update an existing data source
|
||||
|
||||
`PUT /api/datasources/:datasourceId`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PUT /api/datasources/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Delete an existing data source by id
|
||||
@@ -139,6 +155,7 @@ parent = "http_api"
|
||||
`DELETE /api/datasources/:datasourceId`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/datasources/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
@@ -151,9 +168,11 @@ parent = "http_api"
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
## Delete an existing data source by name
|
||||
|
||||
`DELETE /api/datasources/name/:datasourceName`
|
||||
|
||||
**Example Request**:
|
||||
@@ -177,10 +196,12 @@ parent = "http_api"
|
||||
|
||||
`GET /api/datasources/proxy/:datasourceId/*`
|
||||
|
||||
Proxies all calls to the actual datasource.
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"id":1,"message":"Datasource added", "name": "test_datasource"}
|
||||
```
|
||||
|
||||
## Update an existing data source
|
||||
|
||||
@@ -188,6 +209,7 @@ parent = "http_api"
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PUT /api/datasources/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -209,13 +231,16 @@ parent = "http_api"
|
||||
"isDefault":false,
|
||||
"jsonData":null
|
||||
}
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Datasource updated", "id": 1, "name": "test_datasource"}
|
||||
```
|
||||
|
||||
## Delete an existing data source by id
|
||||
|
||||
@@ -223,17 +248,21 @@ parent = "http_api"
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/datasources/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Data source deleted"}
|
||||
```
|
||||
|
||||
## Delete an existing data source by name
|
||||
|
||||
@@ -241,17 +270,21 @@ parent = "http_api"
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/datasources/name/test_datasource HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Data source deleted"}
|
||||
```
|
||||
|
||||
## Data source proxy calls
|
||||
|
||||
|
||||
@@ -18,13 +18,16 @@ parent = "http_api"
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/org HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -32,6 +35,7 @@ parent = "http_api"
|
||||
|
||||
## Get Organisation by Id
|
||||
|
||||
`GET /api/orgs/:orgId`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
@@ -39,13 +43,16 @@ parent = "http_api"
|
||||
GET /api/orgs/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
## Get Organisation by Name
|
||||
|
||||
`GET /api/orgs/name/:orgName`
|
||||
@@ -61,20 +68,23 @@ parent = "http_api"
|
||||
|
||||
**Example Response**:
|
||||
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Create Organisation
|
||||
|
||||
`POST /api/orgs`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
POST /api/orgs HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
|
||||
@@ -90,6 +100,7 @@ parent = "http_api"
|
||||
## Update current Organisation
|
||||
|
||||
`PUT /api/org`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
@@ -97,6 +108,7 @@ parent = "http_api"
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
@@ -105,10 +117,11 @@ parent = "http_api"
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
|
||||
```
|
||||
|
||||
## Get all users within the actual organisation
|
||||
|
||||
`GET /api/org/users`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
@@ -116,7 +129,7 @@ parent = "http_api"
|
||||
GET /api/org/users HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
@@ -125,6 +138,7 @@ parent = "http_api"
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Add a new user to the actual organisation
|
||||
|
||||
@@ -133,15 +147,16 @@ parent = "http_api"
|
||||
Adds a global user to the actual organisation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
|
||||
```http
|
||||
POST /api/org/users HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
@@ -149,13 +164,16 @@ parent = "http_api"
|
||||
|
||||
```
|
||||
|
||||
## Updates the given user
|
||||
|
||||
`PATCH /api/org/users/:userId`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PATCH /api/org/users/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
@@ -168,6 +186,7 @@ parent = "http_api"
|
||||
|
||||
```
|
||||
|
||||
## Delete user in actual organisation
|
||||
|
||||
`DELETE /api/org/users/:userId`
|
||||
|
||||
@@ -177,6 +196,7 @@ Adds a global user to the actual organisation.
|
||||
DELETE /api/org/users/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
@@ -186,14 +206,16 @@ Adds a global user to the actual organisation.
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
|
||||
# Organisations
|
||||
|
||||
## Search all Organisations
|
||||
|
||||
`GET /api/orgs`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/orgs HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -201,6 +223,7 @@ Adds a global user to the actual organisation.
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
@@ -209,15 +232,16 @@ Adds a global user to the actual organisation.
|
||||
|
||||
## Update Organisation
|
||||
|
||||
|
||||
`PUT /api/orgs/:orgId`
|
||||
|
||||
Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented yet.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PUT /api/orgs/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
@@ -225,18 +249,21 @@ Adds a global user to the actual organisation.
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Get Users in Organisation
|
||||
|
||||
`GET /api/orgs/:orgId/users`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/orgs/1/users HTTP/1.1
|
||||
Accept: application/json
|
||||
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
@@ -246,13 +273,16 @@ Adds a global user to the actual organisation.
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
[
|
||||
```
|
||||
|
||||
## Add User in Organisation
|
||||
|
||||
`POST /api/orgs/:orgId/users`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
POST /api/orgs/1/users HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
@@ -262,6 +292,7 @@ Adds a global user to the actual organisation.
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
@@ -271,6 +302,7 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y
|
||||
`PATCH /api/orgs/:orgId/users/:userId`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PATCH /api/orgs/1/users/2 HTTP/1.1
|
||||
Accept: application/json
|
||||
@@ -279,14 +311,16 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y
|
||||
|
||||
```
|
||||
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Delete User in Organisation
|
||||
|
||||
`DELETE /api/orgs/:orgId/users/:userId`
|
||||
|
||||
**Example Request**:
|
||||
@@ -294,13 +328,16 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y
|
||||
```http
|
||||
DELETE /api/orgs/1/users/2 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
Content-Type: application/json
|
||||
[
|
||||
@@ -312,6 +349,7 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y
|
||||
"role":"Admin"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Add User in Organisation
|
||||
|
||||
@@ -319,6 +357,7 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
POST /api/orgs/1/users HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -328,13 +367,16 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y
|
||||
"loginOrEmail":"user",
|
||||
"role":"Viewer"
|
||||
}
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"User added to organization"}
|
||||
```
|
||||
|
||||
## Update Users in Organisation
|
||||
|
||||
@@ -342,6 +384,7 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PATCH /api/orgs/1/users/2 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -350,13 +393,16 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y
|
||||
{
|
||||
"role":"Admin"
|
||||
}
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Organization user updated"}
|
||||
```
|
||||
|
||||
## Delete User in Organisation
|
||||
|
||||
@@ -364,14 +410,18 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/orgs/1/users/2 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"User removed from organization"}
|
||||
```
|
||||
@@ -18,13 +18,16 @@ parent = "http_api"
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/frontend/settings HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -55,6 +58,7 @@ parent = "http_api"
|
||||
},
|
||||
"defaultDatasource": "Grafana"
|
||||
}
|
||||
```
|
||||
|
||||
# Login API
|
||||
|
||||
@@ -64,14 +68,18 @@ parent = "http_api"
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/login/ping HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message": "Logged in"}
|
||||
```
|
||||
@@ -26,17 +26,21 @@ system default value.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/user/preferences HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Update Current User Prefs
|
||||
|
||||
`PUT /api/user/preferences`
|
||||
@@ -44,6 +48,7 @@ system default value.
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PUT /api/user/preferences HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
@@ -54,13 +59,16 @@ system default value.
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: text/plain; charset=utf-8
|
||||
|
||||
```
|
||||
|
||||
## Get Current Org Prefs
|
||||
|
||||
`GET /api/org/preferences`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/org/preferences HTTP/1.1
|
||||
Accept: application/json
|
||||
@@ -68,17 +76,21 @@ system default value.
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Update Current Org Prefs
|
||||
|
||||
`PUT /api/org/preferences`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PUT /api/org/preferences HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -86,6 +98,7 @@ system default value.
|
||||
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
@@ -96,10 +109,13 @@ system default value.
|
||||
"homeDashboardId":0,
|
||||
"timezone":"utc"
|
||||
}
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: text/plain; charset=utf-8
|
||||
|
||||
{"message":"Preferences updated"}
|
||||
```
|
||||
@@ -17,6 +17,7 @@ parent = "http_api"
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
POST /api/snapshots HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -51,6 +52,7 @@ parent = "http_api"
|
||||
},
|
||||
"expires": 3600
|
||||
}
|
||||
```
|
||||
|
||||
JSON Body schema:
|
||||
|
||||
@@ -63,6 +65,7 @@ JSON Body schema:
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
{
|
||||
@@ -71,6 +74,7 @@ JSON Body schema:
|
||||
"key":"YYYYYYY",
|
||||
"url":"myurl/dashboard/snapshot/YYYYYYY"
|
||||
}
|
||||
```
|
||||
|
||||
Keys:
|
||||
|
||||
@@ -83,13 +87,16 @@ Keys:
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/snapshots/YYYYYYY HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -131,6 +138,7 @@ Keys:
|
||||
"version":5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Delete Snapshot by Id
|
||||
|
||||
@@ -138,14 +146,18 @@ Keys:
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/snapshots/YYYYYYY HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Snapshot deleted. It might take an hour before it's cleared from a CDN cache."}
|
||||
```
|
||||
@@ -17,15 +17,18 @@ parent = "http_api"
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/users HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
```
|
||||
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`. Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -45,6 +48,7 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
```
|
||||
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`. The `totalCount` field in the response can be used for pagination of the user list E.g. if `totalCount` is equal to 100 users and the `perpage` parameter is set to 10 then there are 10 pages of users. The `query` parameter is optional and it will return results where the query value is contained in one of the `name`, `login` or `email` fields. Query values with spaces need to be url encoded e.g. `query=Jane%20Doe`.
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
@@ -52,10 +56,12 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
{
|
||||
```
|
||||
|
||||
## Get single user by Id
|
||||
|
||||
`GET /api/users/:id`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
@@ -63,6 +69,7 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
GET /api/users/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
```
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
@@ -86,6 +93,7 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Request using the username as option**:
|
||||
|
||||
```http
|
||||
@@ -93,15 +101,17 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
```
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## User Update
|
||||
|
||||
@@ -113,6 +123,7 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
PUT /api/users/2 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
```
|
||||
|
||||
@@ -120,22 +131,27 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Get Organisations for user
|
||||
|
||||
`GET /api/users/:id/orgs`
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/users/1/orgs HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
```
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
@@ -147,7 +163,7 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
## User
|
||||
|
||||
## Actual User
|
||||
|
||||
|
||||
`GET /api/user`
|
||||
|
||||
**Example Request**:
|
||||
@@ -155,6 +171,7 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
```http
|
||||
GET /api/user HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
@@ -166,16 +183,18 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
|
||||
```
|
||||
|
||||
## Change Password
|
||||
|
||||
`PUT /api/user/password`
|
||||
|
||||
Changes the password for the user
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
PUT /api/user/password HTTP/1.1
|
||||
Accept: application/json
|
||||
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
@@ -183,15 +202,18 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Switch user context for a specified user
|
||||
|
||||
`POST /api/users/:userId/using/:organizationId`
|
||||
|
||||
Switch user context to the given organization. Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
POST /api/users/7/using/2 HTTP/1.1
|
||||
@@ -202,6 +224,7 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
@@ -211,13 +234,16 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
|
||||
Switch user context to the given organization.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
POST /api/user/using/2 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
@@ -229,6 +255,7 @@ Requires basic authentication and that the authenticated user is a Grafana Admin
|
||||
|
||||
`GET /api/user/orgs`
|
||||
|
||||
Return a list of all organisations of the current user.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
@@ -238,6 +265,7 @@ Changes the password for the user
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
@@ -248,13 +276,16 @@ Changes the password for the user
|
||||
|
||||
## Star a dashboard
|
||||
|
||||
`POST /api/user/stars/dashboard/:dashboardId`
|
||||
|
||||
Stars the given Dashboard for the actual user.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
POST /api/user/stars/dashboard/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
@@ -264,15 +295,19 @@ Switch user context to the given organization. Requires basic authentication and
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Unstar a dashboard
|
||||
|
||||
`DELETE /api/user/stars/dashboard/:dashboardId`
|
||||
|
||||
Deletes the starring of the given Dashboard for the actual user.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/user/stars/dashboard/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
@@ -282,17 +317,21 @@ Switch user context to the given organization.
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Active organization changed"}
|
||||
```
|
||||
|
||||
## Organisations of the actual User
|
||||
|
||||
@@ -302,13 +341,16 @@ Return a list of all organisations of the current user.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/user/orgs HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
@@ -319,6 +361,7 @@ Return a list of all organisations of the current user.
|
||||
"role":"Admin"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Star a dashboard
|
||||
|
||||
@@ -328,17 +371,21 @@ Stars the given Dashboard for the actual user.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
POST /api/user/stars/dashboard/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Dashboard starred!"}
|
||||
```
|
||||
|
||||
## Unstar a dashboard
|
||||
|
||||
@@ -348,14 +395,18 @@ Deletes the starring of the given Dashboard for the actual user.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
DELETE /api/user/stars/dashboard/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Dashboard unstarred"}
|
||||
```
|
||||
@@ -15,7 +15,7 @@ weight = 1
|
||||
It should be straight forward to get Grafana up and running behind a reverse proxy. But here are some things that you might run into.
|
||||
|
||||
Links and redirects will not be rendered correctly unless you set the server.domain setting.
|
||||
```
|
||||
```bash
|
||||
[server]
|
||||
domain = foo.bar
|
||||
```
|
||||
@@ -28,14 +28,14 @@ Here are some example configurations for running Grafana behind a reverse proxy.
|
||||
|
||||
### Grafana configuration (ex http://foo.bar.com)
|
||||
|
||||
```
|
||||
```bash
|
||||
[server]
|
||||
domain = foo.bar
|
||||
```
|
||||
|
||||
### Nginx configuration
|
||||
|
||||
```
|
||||
```bash
|
||||
server {
|
||||
listen 80;
|
||||
root /usr/share/nginx/www;
|
||||
@@ -50,14 +50,14 @@ server {
|
||||
### Examples with **sub path** (ex http://foo.bar.com/grafana)
|
||||
|
||||
#### Grafana configuration with sub path
|
||||
```
|
||||
```bash
|
||||
[server]
|
||||
domain = foo.bar
|
||||
root_url = %(protocol)s://%(domain)s:/grafana
|
||||
```
|
||||
|
||||
#### Nginx configuration with sub path
|
||||
```
|
||||
```bash
|
||||
server {
|
||||
listen 80;
|
||||
root /usr/share/nginx/www;
|
||||
|
||||
@@ -37,11 +37,14 @@ A common problem is forgetting to uncomment a line in the `custom.ini` (or `graf
|
||||
All options in the configuration file (listed below) can be overridden
|
||||
using environment variables using the syntax:
|
||||
|
||||
```bash
|
||||
GF_<SectionName>_<KeyName>
|
||||
```
|
||||
|
||||
Where the section name is the text within the brackets. Everything
|
||||
should be upper case, `.` should be replaced by `_`. For example, given these configuration settings:
|
||||
|
||||
```bash
|
||||
# default section
|
||||
instance_name = ${HOSTNAME}
|
||||
|
||||
@@ -50,13 +53,15 @@ should be upper case, `.` should be replaced by `_`. For example, given these co
|
||||
|
||||
[auth.google]
|
||||
client_secret = 0ldS3cretKey
|
||||
|
||||
```
|
||||
|
||||
Then you can override them using:
|
||||
|
||||
```bash
|
||||
export GF_DEFAULT_INSTANCE_NAME=my-instance
|
||||
export GF_SECURITY_ADMIN_USER=true
|
||||
export GF_AUTH_GOOGLE_CLIENT_SECRET=newS3cretKey
|
||||
```
|
||||
|
||||
<hr />
|
||||
|
||||
@@ -93,11 +98,15 @@ The IP address to bind to. If empty will bind to all interfaces
|
||||
The port to bind to, defaults to `3000`. To use port 80 you need to
|
||||
either give the Grafana binary permission for example:
|
||||
|
||||
```bash
|
||||
$ sudo setcap 'cap_net_bind_service=+ep' /usr/sbin/grafana-server
|
||||
```
|
||||
|
||||
Or redirect port 80 to the Grafana port using:
|
||||
|
||||
```bash
|
||||
$ sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 3000
|
||||
```
|
||||
|
||||
Another way is put a webserver like Nginx or Apache in front of Grafana and have them proxy requests to Grafana.
|
||||
|
||||
@@ -312,7 +321,9 @@ You need to create a GitHub OAuth application (you find this under the GitHub
|
||||
settings page). When you create the application you will need to specify
|
||||
a callback URL. Specify this as callback:
|
||||
|
||||
```bash
|
||||
http://<my_grafana_server_name_or_ip>:<grafana_server_port>/login/github
|
||||
```
|
||||
|
||||
This callback URL must match the full HTTP address that you use in your
|
||||
browser to access Grafana, but with the prefix path of `/login/github`.
|
||||
@@ -320,6 +331,7 @@ When the GitHub OAuth application is created you will get a Client ID and a
|
||||
Client Secret. Specify these in the Grafana configuration file. For
|
||||
example:
|
||||
|
||||
```bash
|
||||
[auth.github]
|
||||
enabled = true
|
||||
allow_sign_up = true
|
||||
@@ -331,6 +343,7 @@ example:
|
||||
api_url = https://api.github.com/user
|
||||
team_ids =
|
||||
allowed_organizations =
|
||||
```
|
||||
|
||||
Restart the Grafana back-end. You should now see a GitHub login button
|
||||
on the login page. You can now login or sign up with your GitHub
|
||||
@@ -348,6 +361,7 @@ GitHub. If the authenticated user isn't a member of at least one of the
|
||||
teams they will not be able to register or authenticate with your
|
||||
Grafana instance. For example:
|
||||
|
||||
```bash
|
||||
[auth.github]
|
||||
enabled = true
|
||||
client_id = YOUR_GITHUB_APP_CLIENT_ID
|
||||
@@ -357,6 +371,7 @@ Grafana instance. For example:
|
||||
auth_url = https://github.com/login/oauth/authorize
|
||||
token_url = https://github.com/login/oauth/access_token
|
||||
allow_sign_up = true
|
||||
```
|
||||
|
||||
### allowed_organizations
|
||||
|
||||
@@ -365,6 +380,7 @@ organizations on GitHub. If the authenticated user isn't a member of at least
|
||||
one of the organizations they will not be able to register or authenticate with
|
||||
your Grafana instance. For example
|
||||
|
||||
```bash
|
||||
[auth.github]
|
||||
enabled = true
|
||||
client_id = YOUR_GITHUB_APP_CLIENT_ID
|
||||
@@ -375,6 +391,7 @@ your Grafana instance. For example
|
||||
allow_sign_up = true
|
||||
# space-delimited organization names
|
||||
allowed_organizations = github google
|
||||
```
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -385,13 +402,16 @@ Developer Console](https://console.developers.google.com/project). When
|
||||
you create the project you will need to specify a callback URL. Specify
|
||||
this as callback:
|
||||
|
||||
```bash
|
||||
http://<my_grafana_server_name_or_ip>:<grafana_server_port>/login/google
|
||||
```
|
||||
|
||||
This callback URL must match the full HTTP address that you use in your
|
||||
browser to access Grafana, but with the prefix path of `/login/google`.
|
||||
When the Google project is created you will get a Client ID and a Client
|
||||
Secret. Specify these in the Grafana configuration file. For example:
|
||||
|
||||
```bash
|
||||
[auth.google]
|
||||
enabled = true
|
||||
client_id = YOUR_GOOGLE_APP_CLIENT_ID
|
||||
@@ -401,6 +421,7 @@ Secret. Specify these in the Grafana configuration file. For example:
|
||||
token_url = https://accounts.google.com/o/oauth2/token
|
||||
allowed_domains = mycompany.com mycompany.org
|
||||
allow_sign_up = true
|
||||
```
|
||||
|
||||
Restart the Grafana back-end. You should now see a Google login button
|
||||
on the login page. You can now login or sign up with your Google
|
||||
@@ -418,6 +439,7 @@ This option could be used if have your own oauth service.
|
||||
This callback URL must match the full HTTP address that you use in your
|
||||
browser to access Grafana, but with the prefix path of `/login/generic_oauth`.
|
||||
|
||||
```bash
|
||||
[auth.generic_oauth]
|
||||
enabled = true
|
||||
client_id = YOUR_APP_CLIENT_ID
|
||||
@@ -428,9 +450,44 @@ browser to access Grafana, but with the prefix path of `/login/generic_oauth`.
|
||||
api_url =
|
||||
allowed_domains = mycompany.com mycompany.org
|
||||
allow_sign_up = true
|
||||
```
|
||||
|
||||
Set api_url to the resource that returns [OpenID UserInfo](https://connect2id.com/products/server/docs/api/userinfo) compatible information.
|
||||
|
||||
### Set up oauth2 with Okta
|
||||
|
||||
First set up Grafana as an OpenId client "webapplication" in Okta. Then set the Base URIs to `https://<grafana domain>/` and set the Login redirect URIs to `https://<grafana domain>/login/generic_oauth`.
|
||||
|
||||
Finaly set up the generic oauth module like this:
|
||||
```bash
|
||||
[auth.generic_oauth]
|
||||
name = Okta
|
||||
enabled = true
|
||||
scopes = openid profile email
|
||||
client_id = <okta application Client ID>
|
||||
client_secret = <okta application Client Secret>
|
||||
auth_url = https://<okta domain>/oauth2/v1/authorize
|
||||
token_url = https://<okta domain>/oauth2/v1/token
|
||||
api_url = https://<okta domain>/oauth2/v1/userinfo
|
||||
```
|
||||
|
||||
### Set up oauth2 with Bitbucket
|
||||
|
||||
```bash
|
||||
[auth.generic_oauth]
|
||||
name = BitBucket
|
||||
enabled = true
|
||||
allow_sign_up = true
|
||||
client_id = <client id>
|
||||
client_secret = <secret>
|
||||
scopes = account email
|
||||
auth_url = https://bitbucket.org/site/oauth2/authorize
|
||||
token_url = https://bitbucket.org/site/oauth2/access_token
|
||||
api_url = https://api.bitbucket.org/2.0/user
|
||||
team_ids =
|
||||
allowed_organizations =
|
||||
```
|
||||
|
||||
<hr>
|
||||
|
||||
## [auth.basic]
|
||||
@@ -503,21 +560,25 @@ session table manually.
|
||||
|
||||
Mysql Example:
|
||||
|
||||
```bash
|
||||
CREATE TABLE `session` (
|
||||
`key` CHAR(16) NOT NULL,
|
||||
`data` BLOB,
|
||||
`expiry` INT(11) UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`key`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=utf8;
|
||||
```
|
||||
|
||||
Postgres Example:
|
||||
|
||||
```bash
|
||||
CREATE TABLE session (
|
||||
key CHAR(16) NOT NULL,
|
||||
data BYTEA,
|
||||
expiry INTEGER NOT NULL,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
```
|
||||
|
||||
Postgres valid `sslmode` are `disable`, `require` (default), `verify-ca`, and `verify-full`.
|
||||
|
||||
@@ -651,11 +712,16 @@ These options control how images should be made public so they can be shared on
|
||||
You can choose between (s3, webdav, gcs). If left empty Grafana will ignore the upload action.
|
||||
|
||||
## [external_image_storage.s3]
|
||||
|
||||
### bucket
|
||||
Bucket name for S3. e.g. grafana.snapshot
|
||||
|
||||
### region
|
||||
Region name for S3. e.g. 'us-east-1', 'cn-north-1', etc
|
||||
|
||||
### path
|
||||
Optional extra path inside bucket, useful to apply expiration policies
|
||||
|
||||
### bucket_url
|
||||
(for backward compatibility, only works when no bucket or region are configured)
|
||||
Bucket URL for S3. AWS region can be specified within URL or defaults to 'us-east-1', e.g.
|
||||
|
||||
@@ -45,13 +45,17 @@ sudo dpkg -i grafana_4.5.2-beta1_amd64.deb
|
||||
|
||||
Add the following line to your `/etc/apt/sources.list` file.
|
||||
|
||||
```bash
|
||||
deb https://packagecloud.io/grafana/stable/debian/ jessie main
|
||||
```
|
||||
|
||||
Use the above line even if you are on Ubuntu or another Debian version.
|
||||
There is also a testing repository if you want beta or release
|
||||
candidates.
|
||||
|
||||
```bash
|
||||
deb https://packagecloud.io/grafana/testing/debian/ jessie main
|
||||
```
|
||||
|
||||
Then add the [Package Cloud](https://packagecloud.io/grafana) key. This
|
||||
allows you to install signed packages.
|
||||
|
||||
@@ -14,7 +14,9 @@ weight = 4
|
||||
|
||||
Grafana is very easy to install and run using the offical docker container.
|
||||
|
||||
```bash
|
||||
$ docker run -d -p 3000:3000 grafana/grafana
|
||||
```
|
||||
|
||||
All Grafana configuration settings can be defined using environment
|
||||
variables, this is especially useful when using the above container.
|
||||
@@ -26,10 +28,12 @@ folder `/var/lib/grafana` and configuration files is in `/etc/grafana/`
|
||||
folder. You can map these volumes to host folders when you start the
|
||||
container:
|
||||
|
||||
```bash
|
||||
$ docker run -d -p 3000:3000 \
|
||||
-v /var/lib/grafana:/var/lib/grafana \
|
||||
-e "GF_SECURITY_ADMIN_PASSWORD=secret" \
|
||||
grafana/grafana
|
||||
```
|
||||
|
||||
In the above example I map the data folder and sets a configuration option via
|
||||
an `ENV` instruction.
|
||||
|
||||
@@ -92,7 +92,7 @@ org_role = "Viewer"
|
||||
By default the configuration expects you to specify a bind DN and bind password. This should be a read only user that can perform LDAP searches.
|
||||
When the user DN is found a second bind is performed with the user provided username & password (in the normal Grafana login form).
|
||||
|
||||
```
|
||||
```bash
|
||||
bind_dn = "cn=admin,dc=grafana,dc=org"
|
||||
bind_password = "grafana"
|
||||
```
|
||||
@@ -102,7 +102,7 @@ bind_password = "grafana"
|
||||
If you can provide a single bind expression that matches all possible users, you can skip the second bind and bind against the user DN directly.
|
||||
This allows you to not specify a bind_password in the configuration file.
|
||||
|
||||
```
|
||||
```bash
|
||||
bind_dn = "cn=%s,o=users,dc=grafana,dc=org"
|
||||
```
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ Installation can be done using [homebrew](http://brew.sh/)
|
||||
|
||||
Install latest stable:
|
||||
|
||||
```
|
||||
```bash
|
||||
brew update
|
||||
brew install grafana
|
||||
```
|
||||
@@ -24,7 +24,7 @@ To start grafana look at the command printed after the homebrew install complete
|
||||
|
||||
To upgrade use the reinstall command
|
||||
|
||||
```
|
||||
```bash
|
||||
brew update
|
||||
brew reinstall grafana
|
||||
```
|
||||
@@ -34,13 +34,13 @@ brew reinstall grafana
|
||||
You can also install the latest unstable grafana from git:
|
||||
|
||||
|
||||
```
|
||||
```bash
|
||||
brew install --HEAD grafana/grafana/grafana
|
||||
```
|
||||
|
||||
To upgrade grafana if you've installed from HEAD:
|
||||
|
||||
```
|
||||
```bash
|
||||
brew reinstall --HEAD grafana/grafana/grafana
|
||||
```
|
||||
|
||||
@@ -48,13 +48,13 @@ brew reinstall --HEAD grafana/grafana/grafana
|
||||
|
||||
To start Grafana using homebrew services first make sure homebrew/services is installed.
|
||||
|
||||
```
|
||||
```bash
|
||||
brew tap homebrew/services
|
||||
```
|
||||
|
||||
Then start Grafana using:
|
||||
|
||||
```
|
||||
```bash
|
||||
brew services start grafana
|
||||
```
|
||||
|
||||
|
||||
@@ -26,24 +26,31 @@ installation.
|
||||
|
||||
You can install Grafana using Yum directly.
|
||||
|
||||
```bash
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.2-1.x86_64.rpm
|
||||
```
|
||||
|
||||
Or install manually using `rpm`.
|
||||
|
||||
#### On CentOS / Fedora / Redhat:
|
||||
|
||||
```bash
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.2-1.x86_64.rpm
|
||||
$ sudo yum install initscripts fontconfig
|
||||
$ sudo rpm -Uvh grafana-4.5.2-1.x86_64.rpm
|
||||
```
|
||||
|
||||
#### On OpenSuse:
|
||||
|
||||
```bash
|
||||
$ sudo rpm -i --nodeps grafana-4.5.2-1.x86_64.rpm
|
||||
```
|
||||
|
||||
## Install via YUM Repository
|
||||
|
||||
Add the following to a new file at `/etc/yum.repos.d/grafana.repo`
|
||||
|
||||
```bash
|
||||
[grafana]
|
||||
name=grafana
|
||||
baseurl=https://packagecloud.io/grafana/stable/el/6/$basearch
|
||||
@@ -53,14 +60,19 @@ Add the following to a new file at `/etc/yum.repos.d/grafana.repo`
|
||||
gpgkey=https://packagecloud.io/gpg.key https://grafanarel.s3.amazonaws.com/RPM-GPG-KEY-grafana
|
||||
sslverify=1
|
||||
sslcacert=/etc/pki/tls/certs/ca-bundle.crt
|
||||
```
|
||||
|
||||
There is also a testing repository if you want beta or release candidates.
|
||||
|
||||
```bash
|
||||
baseurl=https://packagecloud.io/grafana/testing/el/6/$basearch
|
||||
```
|
||||
|
||||
Then install Grafana via the `yum` command.
|
||||
|
||||
```bash
|
||||
$ sudo yum install grafana
|
||||
```
|
||||
|
||||
### RPM GPG Key
|
||||
|
||||
@@ -81,7 +93,9 @@ key](https://grafanarel.s3.amazonaws.com/RPM-GPG-KEY-grafana).
|
||||
|
||||
You can start Grafana by running:
|
||||
|
||||
```bash
|
||||
$ sudo service grafana-server start
|
||||
```
|
||||
|
||||
This will start the `grafana-server` process as the `grafana` user,
|
||||
which is created during package installation. The default HTTP port is
|
||||
@@ -89,17 +103,23 @@ which is created during package installation. The default HTTP port is
|
||||
|
||||
To configure the Grafana server to start at boot time:
|
||||
|
||||
```bash
|
||||
$ sudo /sbin/chkconfig --add grafana-server
|
||||
```
|
||||
|
||||
## Start the server (via systemd)
|
||||
|
||||
```bash
|
||||
$ systemctl daemon-reload
|
||||
$ systemctl start grafana-server
|
||||
$ systemctl status grafana-server
|
||||
```
|
||||
|
||||
### Enable the systemd service to start at boot
|
||||
|
||||
```bash
|
||||
sudo systemctl enable grafana-server.service
|
||||
```
|
||||
|
||||
## Environment file
|
||||
|
||||
@@ -138,7 +158,7 @@ for example in alert notifications.
|
||||
|
||||
If the image is missing text make sure you have font packages installed.
|
||||
|
||||
```
|
||||
```bash
|
||||
yum install fontconfig
|
||||
yum install freetype*
|
||||
yum install urw-fonts
|
||||
|
||||
@@ -29,7 +29,7 @@ installed grafana to custom location using a binary tar/zip it is usally in `<gr
|
||||
|
||||
#### mysql
|
||||
|
||||
```
|
||||
```bash
|
||||
backup:
|
||||
> mysqldump -u root -p[root_password] [grafana] > grafana_backup.sql
|
||||
|
||||
@@ -39,7 +39,7 @@ restore:
|
||||
|
||||
#### postgres
|
||||
|
||||
```
|
||||
```bash
|
||||
backup:
|
||||
> pg_dump grafana > grafana_backup
|
||||
|
||||
@@ -54,7 +54,7 @@ and execute the same `dpkg -i` command but with the new package. It will upgrade
|
||||
|
||||
If you used our APT repository:
|
||||
|
||||
```
|
||||
```bash
|
||||
sudo apt-get update
|
||||
sudo apt-get install grafana
|
||||
```
|
||||
@@ -73,14 +73,14 @@ and execute the same `yum install` or `rpm -i` command but with the new package.
|
||||
|
||||
If you used our YUM repository:
|
||||
|
||||
```
|
||||
```bash
|
||||
sudo yum update grafana
|
||||
```
|
||||
|
||||
### Docker
|
||||
|
||||
This just an example, details depend on how you configured your grafana container.
|
||||
```
|
||||
```bash
|
||||
docker pull grafana
|
||||
docker stop my-grafana-container
|
||||
docker rm my-grafana-container
|
||||
|
||||
@@ -23,7 +23,7 @@ The most important fields are the first three, especially the id. The convention
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
```bash
|
||||
raintank-worldping-app
|
||||
grafana-simple-json-datasource
|
||||
grafana-piechart-panel
|
||||
@@ -66,7 +66,7 @@ The README.md file is rendered both on Grafana.net and in the plugins section in
|
||||
|
||||
Here is a typical directory structure for a plugin.
|
||||
|
||||
```
|
||||
```bash
|
||||
johnnyb-awesome-datasource
|
||||
|-- dist
|
||||
|-- spec
|
||||
|
||||
@@ -45,7 +45,7 @@ The javascript object that communicates with the database and transforms data to
|
||||
|
||||
The Datasource should contain the following functions:
|
||||
|
||||
```
|
||||
```javascript
|
||||
query(options) //used by panels to get data
|
||||
testDatasource() //used by datasource configuration page to make sure the connection is working
|
||||
annotationQuery(options) // used by dashboards to get annotations
|
||||
|
||||
@@ -30,37 +30,37 @@ On Linux systems the grafana-cli will assume that the grafana plugin directory i
|
||||
### Grafana-cli Commands
|
||||
|
||||
List available plugins
|
||||
```
|
||||
```bash
|
||||
grafana-cli plugins list-remote
|
||||
```
|
||||
|
||||
Install the latest version of a plugin
|
||||
```
|
||||
```bash
|
||||
grafana-cli plugins install <plugin-id>
|
||||
```
|
||||
|
||||
Install a specific version of a plugin
|
||||
```
|
||||
```bash
|
||||
grafana-cli plugins install <plugin-id> <version>
|
||||
```
|
||||
|
||||
List installed plugins
|
||||
```
|
||||
```bash
|
||||
grafana-cli plugins ls
|
||||
```
|
||||
|
||||
Update all installed plugins
|
||||
```
|
||||
```bash
|
||||
grafana-cli plugins update-all
|
||||
```
|
||||
|
||||
Update one plugin
|
||||
```
|
||||
```bash
|
||||
grafana-cli plugins update <plugin-id>
|
||||
```
|
||||
|
||||
Remove one plugin
|
||||
```
|
||||
```bash
|
||||
grafana-cli plugins remove <plugin-id>
|
||||
```
|
||||
|
||||
@@ -73,7 +73,7 @@ The Download URL from Grafana.com API is in this form:
|
||||
`https://grafana.com/api/plugins/<plugin id>/versions/<version number>/download`
|
||||
|
||||
You can specify a local URL by using the `--pluginUrl` option.
|
||||
```
|
||||
```bash
|
||||
grafana-cli --pluginUrl https://nexus.company.com/grafana/plugins/<plugin-id>-<plugin-version>.zip plugins install <plugin-id>
|
||||
```
|
||||
|
||||
@@ -84,7 +84,7 @@ To manually install a Plugin via the Grafana.com API:
|
||||
{{< imgbox img="/img/docs/installation-tab.png" caption="Installation Tab" >}}
|
||||
|
||||
2. Use the Grafana API to find the plugin using this url `https://grafana.com/api/plugins/<plugin id from step 1>`. For example: https://grafana.com/api/plugins/jdbranham-diagram-panel should return:
|
||||
```
|
||||
```bash
|
||||
{
|
||||
"id": 145,
|
||||
"typeId": 3,
|
||||
@@ -97,7 +97,7 @@ To manually install a Plugin via the Grafana.com API:
|
||||
```
|
||||
|
||||
3. Find the download link:
|
||||
```
|
||||
```bash
|
||||
{
|
||||
"rel": "download",
|
||||
"href": "/plugins/jdbranham-diagram-panel/versions/1.4.0/download"
|
||||
|
||||
@@ -13,27 +13,27 @@ dev environment. Grafana ships with its own required backend server; also comple
|
||||
|
||||
## Dependencies
|
||||
|
||||
- [Go 1.9](https://golang.org/dl/)
|
||||
- [Go 1.9.1](https://golang.org/dl/)
|
||||
- [NodeJS LTS](https://nodejs.org/download/)
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
|
||||
## Get Code
|
||||
Create a directory for the project and set your path accordingly (or use the [default Go workspace directory](https://golang.org/doc/code.html#GOPATH)). Then download and install Grafana into your $GOPATH directory:
|
||||
|
||||
```
|
||||
```bash
|
||||
export GOPATH=`pwd`
|
||||
go get github.com/grafana/grafana
|
||||
```
|
||||
|
||||
On Windows use setx instead of export and then restart your command prompt:
|
||||
```
|
||||
```bash
|
||||
setx GOPATH %cd%
|
||||
```
|
||||
|
||||
You may see an error such as: `package github.com/grafana/grafana: no buildable Go source files`. This is just a warning, and you can proceed with the directions.
|
||||
|
||||
## Building the backend
|
||||
```
|
||||
```bash
|
||||
cd $GOPATH/src/github.com/grafana/grafana
|
||||
go run build.go setup
|
||||
go run build.go build # (or 'go build ./pkg/cmd/grafana-server')
|
||||
@@ -45,7 +45,7 @@ to install GCC. We recommend [TDM-GCC](http://tdm-gcc.tdragon.net/download).
|
||||
|
||||
[node-gyp](https://github.com/nodejs/node-gyp#installation) is the Node.js native addon build tool and it requires extra dependencies to be installed on Windows. In a command prompt which is run as administrator, run:
|
||||
|
||||
```
|
||||
```bash
|
||||
npm --add-python-to-path='true' --debug install --global windows-build-tools
|
||||
```
|
||||
|
||||
@@ -53,7 +53,7 @@ npm --add-python-to-path='true' --debug install --global windows-build-tools
|
||||
|
||||
For this you need nodejs (v.6+).
|
||||
|
||||
```
|
||||
```bash
|
||||
npm install -g yarn
|
||||
yarn install --pure-lockfile
|
||||
npm run build
|
||||
@@ -62,7 +62,7 @@ npm run build
|
||||
## Running Grafana Locally
|
||||
You can run a local instance of Grafana by running:
|
||||
|
||||
```
|
||||
```bash
|
||||
./bin/grafana-server
|
||||
```
|
||||
If you built the binary with `go run build.go build`, run `./bin/grafana-server`
|
||||
@@ -76,7 +76,7 @@ Open grafana in your browser (default [http://localhost:3000](http://localhost:3
|
||||
To add features, customize your config, etc, you'll need to rebuild the backend when you change the source code. We use a tool named `bra` that
|
||||
does this.
|
||||
|
||||
```
|
||||
```bash
|
||||
go get github.com/Unknwon/bra
|
||||
|
||||
bra run
|
||||
@@ -88,7 +88,7 @@ You'll also need to run `npm run watch` to watch for changes to the front-end (t
|
||||
|
||||
This step builds linux packages and requires that fpm is installed. Install fpm via `gem install fpm`.
|
||||
|
||||
```
|
||||
```bash
|
||||
go run build.go build package
|
||||
```
|
||||
|
||||
|
||||
@@ -10,12 +10,45 @@ weight = 2
|
||||
|
||||
# Annotations
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v46/annotations.png" max-width="800px" >}}
|
||||
|
||||
Annotations provide a way to mark points on the graph with rich events. When you hover over an annotation
|
||||
you can get title, tags, and text information for the event.
|
||||
you can get event description and event tags. The text field can include links to other systems with more detail.
|
||||
|
||||

|
||||
## Native annotations
|
||||
|
||||
## Queries
|
||||
Grafana v4.6+ comes with a native annotation store and the ability to add annotation events directly from the graph panel or via the [HTTP API]({{< relref "http_api/annotations.md" >}}).
|
||||
|
||||
## Adding annotations
|
||||
|
||||
By holding down CTRL/CMD + mouse click. Add tags to the annotation will make it searchable from other dashboards.
|
||||
|
||||
{{< docs-imagebox img="/img/docs/annotations/annotation-still.png"
|
||||
max-width="600px" animated-gif="/img/docs/annotations/annotation.gif" >}}
|
||||
|
||||
### Adding regions events
|
||||
|
||||
You can also hold down CTRL/CMD and select region to create a region annotation.
|
||||
|
||||
{{< docs-imagebox img="/img/docs/annotations/region-annotation-still.png"
|
||||
max-width="600px" animated-gif="/img/docs/annotations/region-annotation.gif" >}}
|
||||
|
||||
### Built in query
|
||||
|
||||
After you added an annotation they will still be visible. This is due to the built in annotation query that exists on all dashboards. This annotation query will
|
||||
fetch all annotation events that originate from the current dashboard and show them on the panel where they where created. This includes alert state history annotations. You can
|
||||
stop annotations from being fetched & drawn by opening the **Annotations** settings (via Dashboard cogs menu) and modifying the query named `Annotations & Alerts (Built-in)`.
|
||||
|
||||
When you copy a dashboard using the **Save As** feature it will get a new dashboard id so annotations created on source dashboard will no longer be visible on the copy. You
|
||||
can still show them if you add a new **Annotation Query** and filter by tags. But this only works if the annotations on the source dashboard had tags to filter by.
|
||||
|
||||
### Query by tag
|
||||
|
||||
You can create new annotation queries that fetch annotations from the native annotation store via the `-- Grafana --` data source and by setting *Filter by* to `Tags`. Specify at least
|
||||
one tag. For example create an annotation query name `outages` and specify a tag named `outage`. This query will show all annotations you create (from any dashboard or via API) that
|
||||
have the `outage` tag.
|
||||
|
||||
## Querying other data sources
|
||||
|
||||
Annotation events are fetched via annotation queries. To add a new annotation query to a dashboard
|
||||
open the dashboard settings menu, then select `Annotations`. This will open the dashboard annotations
|
||||
|
||||
@@ -24,7 +24,7 @@ When a user creates a new dashboard, a new dashboard JSON object is initialized
|
||||
|
||||
> Note: In the following JSON, id is shown as null which is the default value assigned to it until a dashboard is saved. Once a dashboard is saved, an integer value is assigned to the `id` field.
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"id": null,
|
||||
"title": "New dashboard",
|
||||
|
||||
@@ -43,7 +43,7 @@ You also get a link to service side rendered PNG of the panel. Useful if you wan
|
||||
|
||||
Example of a link to a server-side rendered PNG:
|
||||
|
||||
```
|
||||
```bash
|
||||
http://play.grafana.org/render/dashboard-solo/db/grafana-play-home?orgId=1&panelId=4&from=1499272191563&to=1499279391563&width=1000&height=500&tz=UTC%2B02%3A00&timeout=5000
|
||||
```
|
||||
|
||||
|
||||
@@ -22,24 +22,24 @@ Some parts of the API are only available through basic authentication and these
|
||||
The task is to create a new organization and then add a Token that can be used by other users. In the examples below which use basic auth, the user is `admin` and the password is `admin`.
|
||||
|
||||
1. [Create the org](http://docs.grafana.org/http_api/org/#create-organisation). Here is an example using curl:
|
||||
```
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"name":"apiorg"}' http://admin:admin@localhost:3000/api/orgs
|
||||
```
|
||||
|
||||
This should return a response: `{"message":"Organization created","orgId":6}`. Use the orgId for the next steps.
|
||||
|
||||
2. Optional step. If the org was created previously and/or step 3 fails then first [add your Admin user to the org](http://docs.grafana.org/http_api/org/#add-user-in-organisation):
|
||||
```
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"loginOrEmail":"admin", "role": "Admin"}' http://admin:admin@localhost:3000/api/orgs/<org id of new org>/users
|
||||
```
|
||||
|
||||
3. [Switch the org context for the Admin user to the new org](http://docs.grafana.org/http_api/user/#switch-user-context):
|
||||
```
|
||||
```bash
|
||||
curl -X POST http://admin:admin@localhost:3000/api/user/using/<id of new org>
|
||||
```
|
||||
|
||||
4. [Create the API token](http://docs.grafana.org/http_api/auth/#create-api-key):
|
||||
```
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"name":"apikeycurl", "role": "Admin"}' http://admin:admin@localhost:3000/api/auth/keys
|
||||
```
|
||||
|
||||
@@ -53,7 +53,7 @@ Using the Token that was created in the previous step, you can create a dashboar
|
||||
|
||||
1. [Add a dashboard](http://docs.grafana.org/http_api/dashboard/#create-update-dashboard) using the key (or bearer token as it is also called):
|
||||
|
||||
```
|
||||
```bash
|
||||
curl -X POST --insecure -H "Authorization: Bearer eyJrIjoiR0ZXZmt1UFc0OEpIOGN5RWdUalBJTllUTk83VlhtVGwiLCJuIjoiYXBpa2V5Y3VybCIsImlkIjo2fQ==" -H "Content-Type: application/json" -d '{
|
||||
"dashboard": {
|
||||
"id": null,
|
||||
|
||||
243
docs/sources/tutorials/authproxy.md
Normal file
243
docs/sources/tutorials/authproxy.md
Normal file
@@ -0,0 +1,243 @@
|
||||
+++
|
||||
title = "Grafana Authproxy"
|
||||
type = "docs"
|
||||
keywords = ["grafana", "tutorials", "authproxy"]
|
||||
[menu.docs]
|
||||
parent = "tutorials"
|
||||
weight = 10
|
||||
+++
|
||||
|
||||
# Grafana Authproxy
|
||||
|
||||
AuthProxy allows you to offload the authentication of users to a web server (there are many reasons why you’d want to run a web server in front of a production version of Grafana, especially if it’s exposed to the Internet).
|
||||
|
||||
Popular web servers have a very extensive list of pluggable authentication modules, and any of them can be used with the AuthProxy feature.
|
||||
|
||||
The Grafana AuthProxy feature is very simple in design, but it is this simplicity that makes it so powerful.
|
||||
|
||||
## Interacting with Grafana’s AuthProxy via curl
|
||||
|
||||
The AuthProxy feature can be configured through the Grafana configuration file with the following options:
|
||||
|
||||
```js
|
||||
[auth.proxy]
|
||||
enabled = true
|
||||
header_name = X-WEBAUTH-USER
|
||||
header_property = username
|
||||
auto_sign_up = true
|
||||
```
|
||||
|
||||
* **enabled**: this is to toggle the feature on or off
|
||||
* **header_name**: this is the HTTP header name that passes the username or email address of the authenticated user to Grafana. Grafana will trust what ever username is contained in this header and automatically log the user in.
|
||||
* **header_property**: this tells Grafana whether the value in the header_name is a username or an email address. (In Grafana you can log in using your account username or account email)
|
||||
* **auto_sign_up**: If set to true, Grafana will automatically create user accounts in the Grafana DB if one does not exist. If set to false, users who do not exist in the GrafanaDB won’t be able to log in, even though their username and password are valid.
|
||||
|
||||
With a fresh install of Grafana, using the above configuration for the authProxy feature, we can send a simple API call to list all users. The only user that will be present is the default “Admin” user that is added the first time Grafana starts up. As you can see all we need to do to authenticate the request is to provide the “X-WEBAUTH-USER” header.
|
||||
|
||||
```bash
|
||||
curl -H "X-WEBAUTH-USER: admin" http://localhost:3000/api/users
|
||||
[
|
||||
{
|
||||
"id":1,
|
||||
"name":"",
|
||||
"login":"admin",
|
||||
"email":"admin@localhost",
|
||||
"isAdmin":true
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
We can then send a second request to the `/api/user` method which will return the details of the logged in user. We will use this request to show how Grafana automatically adds the new user we specify to the system. Here we create a new user called “anthony”.
|
||||
|
||||
```bash
|
||||
curl -H "X-WEBAUTH-USER: anthony" http://localhost:3000/api/user
|
||||
{
|
||||
"email":"anthony",
|
||||
"name":"",
|
||||
"login":"anthony",
|
||||
"theme":"",
|
||||
"orgId":1,
|
||||
"isGrafanaAdmin":false
|
||||
}
|
||||
```
|
||||
|
||||
## Making Apache’s auth work together with Grafana’s AuthProxy
|
||||
|
||||
I’ll demonstrate how to use Apache for authenticating users. In this example we use BasicAuth with Apache’s text file based authentication handler, i.e. htpasswd files. However, any available Apache authentication capabilities could be used.
|
||||
|
||||
### Apache BasicAuth
|
||||
|
||||
In this example we use Apache as a reverseProxy in front of Grafana. Apache handles the Authentication of users before forwarding requests to the Grafana backend service.
|
||||
|
||||
#### Apache configuration
|
||||
|
||||
```bash
|
||||
<VirtualHost *:80>
|
||||
ServerAdmin webmaster@authproxy
|
||||
ServerName authproxy
|
||||
ErrorLog "logs/authproxy-error_log"
|
||||
CustomLog "logs/authproxy-access_log" common
|
||||
|
||||
<Proxy *>
|
||||
AuthType Basic
|
||||
AuthName GrafanaAuthProxy
|
||||
AuthBasicProvider file
|
||||
AuthUserFile /etc/apache2/grafana_htpasswd
|
||||
Require valid-user
|
||||
|
||||
RewriteEngine On
|
||||
RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER},NS]
|
||||
RequestHeader set X-WEBAUTH-USER "%{PROXY_USER}e"
|
||||
</Proxy>
|
||||
|
||||
RequestHeader unset Authorization
|
||||
|
||||
ProxyRequests Off
|
||||
ProxyPass / http://localhost:3000/
|
||||
ProxyPassReverse / http://localhost:3000/
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
* The first 4 lines of the virtualhost configuration are standard, so we won’t go into detail on what they do.
|
||||
|
||||
* We use a **\<proxy>** configuration block for applying our authentication rules to every proxied request. These rules include requiring basic authentication where user:password credentials are stored in the **/etc/apache2/grafana_htpasswd** file. This file can be created with the `htpasswd` command.
|
||||
|
||||
* The next part of the configuration is the tricky part. We use Apache’s rewrite engine to create our **X-WEBAUTH-USER header**, populated with the authenticated user.
|
||||
|
||||
* **RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER}, NS]**: This line is a little bit of magic. What it does, is for every request use the rewriteEngines look-ahead (LA-U) feature to determine what the REMOTE_USER variable would be set to after processing the request. Then assign the result to the variable PROXY_USER. This is neccessary as the REMOTE_USER variable is not available to the RequestHeader function.
|
||||
|
||||
* **RequestHeader set X-WEBAUTH-USER “%{PROXY_USER}e”**: With the authenticated username now stored in the PROXY_USER variable, we create a new HTTP request header that will be sent to our backend Grafana containing the username.
|
||||
|
||||
* The **RequestHeader unset Authorization** removes the Authorization header from the HTTP request before it is forwarded to Grafana. This ensures that Grafana does not try to authenticate the user using these credentials (BasicAuth is a supported authentication handler in Grafana).
|
||||
|
||||
* The last 3 lines are then just standard reverse proxy configuration to direct all authenticated requests to our Grafana server running on port 3000.
|
||||
|
||||
#### Grafana configuration
|
||||
|
||||
```bash
|
||||
############# Users ################
|
||||
[users]
|
||||
# disable user signup / registration
|
||||
allow_sign_up = false
|
||||
|
||||
# Set to true to automatically assign new users to the default organization (id 1)
|
||||
auto_assign_org = true
|
||||
|
||||
# Default role new users will be automatically assigned (if auto_assign_org above is set to true)
|
||||
auto_assign_org_role = Editor
|
||||
|
||||
|
||||
############ Auth Proxy ########
|
||||
[auth.proxy]
|
||||
enabled = true
|
||||
|
||||
# the Header name that contains the authenticated user.
|
||||
header_name = X-WEBAUTH-USER
|
||||
|
||||
# does the user authenticate against the proxy using a 'username' or an 'email'
|
||||
header_property = username
|
||||
|
||||
# automatically add the user to the system if they don't already exist.
|
||||
auto_sign_up = true
|
||||
```
|
||||
|
||||
#### Full walk through using Docker.
|
||||
|
||||
##### Grafana Container
|
||||
|
||||
For this example, we use the offical Grafana docker image available at [Docker Hub](https://hub.docker.com/r/grafana/grafana/)
|
||||
|
||||
* Create a file `grafana.ini` with the following contents
|
||||
|
||||
```bash
|
||||
[users]
|
||||
allow_sign_up = false
|
||||
auto_assign_org = true
|
||||
auto_assign_org_role = Editor
|
||||
|
||||
[auth.proxy]
|
||||
enabled = true
|
||||
header_name = X-WEBAUTH-USER
|
||||
header_property = username
|
||||
auto_sign_up = true
|
||||
```
|
||||
|
||||
* Launch the Grafana container, using our custom grafana.ini to replace `/etc/grafana/grafana.ini`. We dont expose any ports for this container as it will only be connected to by our Apache container.
|
||||
|
||||
```bash
|
||||
docker run -i -v $(pwd)/grafana.ini:/etc/grafana/grafana.ini --name grafana grafana/grafana
|
||||
```
|
||||
|
||||
### Apache Container
|
||||
|
||||
For this example we use the offical Apache docker image available at [Docker Hub](https://hub.docker.com/_/httpd/)
|
||||
|
||||
* Create a file `httpd.conf` with the following contents
|
||||
|
||||
```bash
|
||||
ServerRoot "/usr/local/apache2"
|
||||
Listen 80
|
||||
LoadModule authn_file_module modules/mod_authn_file.so
|
||||
LoadModule authn_core_module modules/mod_authn_core.so
|
||||
LoadModule authz_host_module modules/mod_authz_host.so
|
||||
LoadModule authz_user_module modules/mod_authz_user.so
|
||||
LoadModule authz_core_module modules/mod_authz_core.so
|
||||
LoadModule auth_basic_module modules/mod_auth_basic.so
|
||||
LoadModule log_config_module modules/mod_log_config.so
|
||||
LoadModule env_module modules/mod_env.so
|
||||
LoadModule headers_module modules/mod_headers.so
|
||||
LoadModule unixd_module modules/mod_unixd.so
|
||||
LoadModule rewrite_module modules/mod_rewrite.so
|
||||
LoadModule proxy_module modules/mod_proxy.so
|
||||
LoadModule proxy_http_module modules/mod_proxy_http.so
|
||||
<IfModule unixd_module>
|
||||
User daemon
|
||||
Group daemon
|
||||
</IfModule>
|
||||
ServerAdmin you@example.com
|
||||
<Directory />
|
||||
AllowOverride none
|
||||
Require all denied
|
||||
</Directory>
|
||||
DocumentRoot "/usr/local/apache2/htdocs"
|
||||
ErrorLog /proc/self/fd/2
|
||||
LogLevel error
|
||||
<IfModule log_config_module>
|
||||
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
|
||||
LogFormat "%h %l %u %t \"%r\" %>s %b" common
|
||||
<IfModule logio_module>
|
||||
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
|
||||
</IfModule>
|
||||
CustomLog /proc/self/fd/1 common
|
||||
</IfModule>
|
||||
<Proxy *>
|
||||
AuthType Basic
|
||||
AuthName GrafanaAuthProxy
|
||||
AuthBasicProvider file
|
||||
AuthUserFile /tmp/htpasswd
|
||||
Require valid-user
|
||||
RewriteEngine On
|
||||
RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER},NS]
|
||||
RequestHeader set X-WEBAUTH-USER "%{PROXY_USER}e"
|
||||
</Proxy>
|
||||
RequestHeader unset Authorization
|
||||
ProxyRequests Off
|
||||
ProxyPass / http://grafana:3000/
|
||||
ProxyPassReverse / http://grafana:3000/
|
||||
```
|
||||
|
||||
* Create a htpasswd file. We create a new user **anthony** with the password **password**
|
||||
|
||||
```bash
|
||||
htpasswd -bc htpasswd anthony password
|
||||
```
|
||||
|
||||
* Launch the httpd container using our custom httpd.conf and our htpasswd file. The container will listen on port 80, and we create a link to the **grafana** container so that this container can resolve the hostname **grafana** to the grafana container’s ip address.
|
||||
|
||||
```bash
|
||||
docker run -i -p 80:80 --link grafana:grafana -v $(pwd)/httpd.conf:/usr/local/apache2/conf/httpd.conf -v $(pwd)/htpasswd:/tmp/htpasswd httpd:2.4
|
||||
```
|
||||
|
||||
### Use grafana.
|
||||
|
||||
With our Grafana and Apache containers running, you can now connect to http://localhost/ and log in using the username/password we created in the htpasswd file.
|
||||
@@ -39,9 +39,9 @@ read the official [Getting Started With Hubot](https://hubot.github.com/docs/) g
|
||||
## Install Hubot-Grafana script
|
||||
|
||||
In your Hubot project repo install the Grafana plugin using `npm`:
|
||||
|
||||
```bash
|
||||
npm install hubot-grafana --save
|
||||
|
||||
```
|
||||
Edit the file external-scripts.json, and add hubot-grafana to the list of plugins.
|
||||
|
||||
```json
|
||||
@@ -56,6 +56,7 @@ Edit the file external-scripts.json, and add hubot-grafana to the list of plugin
|
||||
|
||||
The `hubot-grafana` plugin requires a number of environment variables to be set in order to work properly.
|
||||
|
||||
```bash
|
||||
export HUBOT_GRAFANA_HOST=http://play.grafana.org
|
||||
export HUBOT_GRAFANA_API_KEY=abcd01234deadbeef01234
|
||||
export HUBOT_GRAFANA_S3_BUCKET=mybucket
|
||||
@@ -63,6 +64,7 @@ The `hubot-grafana` plugin requires a number of environment variables to be set
|
||||
export HUBOT_GRAFANA_S3_SECRET_ACCESS_KEY=aBcD01234dEaDbEef01234
|
||||
export HUBOT_GRAFANA_S3_PREFIX=graphs
|
||||
export HUBOT_GRAFANA_S3_REGION=us-standard
|
||||
```
|
||||
|
||||
### Grafana server side rendering
|
||||
|
||||
@@ -112,7 +114,9 @@ can create hubot command aliases with the hubot script `hubot-alias`.
|
||||
|
||||
Install it:
|
||||
|
||||
```bash
|
||||
npm i --save hubot-alias
|
||||
```
|
||||
|
||||
Now add `hubot-alias` to the list of plugins in `external-scripts.json` and restart hubot.
|
||||
|
||||
|
||||
12
package.json
12
package.json
@@ -4,7 +4,7 @@
|
||||
"company": "Grafana Labs"
|
||||
},
|
||||
"name": "grafana",
|
||||
"version": "4.6.0-pre1",
|
||||
"version": "4.6.0-beta1",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "http://github.com/grafana/grafana.git"
|
||||
@@ -66,7 +66,7 @@
|
||||
"karma-webpack": "^2.0.4",
|
||||
"lint-staged": "^4.2.3",
|
||||
"load-grunt-tasks": "3.5.2",
|
||||
"mocha": "3.5.0",
|
||||
"mocha": "^4.0.1",
|
||||
"ng-annotate-loader": "^0.6.1",
|
||||
"ng-annotate-webpack-plugin": "^0.2.1-pre",
|
||||
"ngtemplate-loader": "^2.0.1",
|
||||
@@ -97,13 +97,14 @@
|
||||
"watch": "./node_modules/.bin/webpack --progress --colors --watch --config scripts/webpack/webpack.dev.js",
|
||||
"build": "./node_modules/.bin/grunt build",
|
||||
"test": "./node_modules/.bin/grunt test",
|
||||
"lint" : "./node_modules/.bin/tslint -c tslint.json --project ./tsconfig.json --type-check",
|
||||
"lint": "./node_modules/.bin/tslint -c tslint.json --project tsconfig.json --type-check",
|
||||
"watch-test": "./node_modules/grunt-cli/bin/grunt karma:dev"
|
||||
},
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"angular": "^1.6.6",
|
||||
"angular-bindonce": "^0.3.1",
|
||||
"angular-mocks": "^1.6.6",
|
||||
"angular-native-dragdrop": "^1.2.2",
|
||||
"angular-route": "^1.6.6",
|
||||
"angular-sanitize": "^1.6.6",
|
||||
@@ -118,10 +119,11 @@
|
||||
"mousetrap": "^1.6.0",
|
||||
"ngreact": "^0.4.1",
|
||||
"react": "^16.0.0",
|
||||
"rxjs": "^5.4.3",
|
||||
"react-dom": "^16.0.0",
|
||||
"remarkable": "^1.7.1",
|
||||
"rxjs": "^5.4.3",
|
||||
"tether": "^1.4.0",
|
||||
"tether-drop": "https://github.com/torkelo/drop"
|
||||
"tether-drop": "https://github.com/torkelo/drop",
|
||||
"tinycolor2": "^1.4.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api/dtos"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
"github.com/grafana/grafana/pkg/services/annotations"
|
||||
)
|
||||
@@ -11,13 +16,12 @@ func GetAnnotations(c *middleware.Context) Response {
|
||||
query := &annotations.ItemQuery{
|
||||
From: c.QueryInt64("from") / 1000,
|
||||
To: c.QueryInt64("to") / 1000,
|
||||
Type: annotations.ItemType(c.Query("type")),
|
||||
OrgId: c.OrgId,
|
||||
AlertId: c.QueryInt64("alertId"),
|
||||
DashboardId: c.QueryInt64("dashboardId"),
|
||||
PanelId: c.QueryInt64("panelId"),
|
||||
Limit: c.QueryInt64("limit"),
|
||||
NewState: c.QueryStrings("newState"),
|
||||
Tags: c.QueryStrings("tags"),
|
||||
}
|
||||
|
||||
repo := annotations.GetRepository()
|
||||
@@ -27,25 +31,14 @@ func GetAnnotations(c *middleware.Context) Response {
|
||||
return ApiError(500, "Failed to get annotations", err)
|
||||
}
|
||||
|
||||
result := make([]dtos.Annotation, 0)
|
||||
|
||||
for _, item := range items {
|
||||
result = append(result, dtos.Annotation{
|
||||
AlertId: item.AlertId,
|
||||
Time: item.Epoch * 1000,
|
||||
Data: item.Data,
|
||||
NewState: item.NewState,
|
||||
PrevState: item.PrevState,
|
||||
Text: item.Text,
|
||||
Metric: item.Metric,
|
||||
Title: item.Title,
|
||||
PanelId: item.PanelId,
|
||||
RegionId: item.RegionId,
|
||||
Type: string(item.Type),
|
||||
})
|
||||
if item.Email != "" {
|
||||
item.AvatarUrl = dtos.GetGravatarUrl(item.Email)
|
||||
}
|
||||
item.Time = item.Time * 1000
|
||||
}
|
||||
|
||||
return Json(200, result)
|
||||
return Json(200, items)
|
||||
}
|
||||
|
||||
func PostAnnotation(c *middleware.Context, cmd dtos.PostAnnotationsCmd) Response {
|
||||
@@ -53,14 +46,13 @@ func PostAnnotation(c *middleware.Context, cmd dtos.PostAnnotationsCmd) Response
|
||||
|
||||
item := annotations.Item{
|
||||
OrgId: c.OrgId,
|
||||
UserId: c.UserId,
|
||||
DashboardId: cmd.DashboardId,
|
||||
PanelId: cmd.PanelId,
|
||||
Epoch: cmd.Time / 1000,
|
||||
Title: cmd.Title,
|
||||
Text: cmd.Text,
|
||||
CategoryId: cmd.CategoryId,
|
||||
NewState: cmd.FillColor,
|
||||
Type: annotations.EventType,
|
||||
Data: cmd.Data,
|
||||
Tags: cmd.Tags,
|
||||
}
|
||||
|
||||
if err := repo.Save(&item); err != nil {
|
||||
@@ -71,12 +63,16 @@ func PostAnnotation(c *middleware.Context, cmd dtos.PostAnnotationsCmd) Response
|
||||
if cmd.IsRegion {
|
||||
item.RegionId = item.Id
|
||||
|
||||
if item.Data == nil {
|
||||
item.Data = simplejson.New()
|
||||
}
|
||||
|
||||
if err := repo.Update(&item); err != nil {
|
||||
return ApiError(500, "Failed set regionId on annotation", err)
|
||||
}
|
||||
|
||||
item.Id = 0
|
||||
item.Epoch = cmd.TimeEnd
|
||||
item.Epoch = cmd.TimeEnd / 1000
|
||||
|
||||
if err := repo.Save(&item); err != nil {
|
||||
return ApiError(500, "Failed save annotation for region end time", err)
|
||||
@@ -86,6 +82,95 @@ func PostAnnotation(c *middleware.Context, cmd dtos.PostAnnotationsCmd) Response
|
||||
return ApiSuccess("Annotation added")
|
||||
}
|
||||
|
||||
type GraphiteAnnotationError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e *GraphiteAnnotationError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func formatGraphiteAnnotation(what string, data string) string {
|
||||
return fmt.Sprintf("%s\n%s", what, data)
|
||||
}
|
||||
|
||||
func PostGraphiteAnnotation(c *middleware.Context, cmd dtos.PostGraphiteAnnotationsCmd) Response {
|
||||
repo := annotations.GetRepository()
|
||||
|
||||
if cmd.When == 0 {
|
||||
cmd.When = time.Now().Unix()
|
||||
}
|
||||
text := formatGraphiteAnnotation(cmd.What, cmd.Data)
|
||||
|
||||
// Support tags in prior to Graphite 0.10.0 format (string of tags separated by space)
|
||||
var tagsArray []string
|
||||
switch tags := cmd.Tags.(type) {
|
||||
case string:
|
||||
tagsArray = strings.Split(tags, " ")
|
||||
case []interface{}:
|
||||
for _, t := range tags {
|
||||
if tagStr, ok := t.(string); ok {
|
||||
tagsArray = append(tagsArray, tagStr)
|
||||
} else {
|
||||
err := &GraphiteAnnotationError{"tag should be a string"}
|
||||
return ApiError(500, "Failed to save Graphite annotation", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
err := &GraphiteAnnotationError{"unsupported tags format"}
|
||||
return ApiError(500, "Failed to save Graphite annotation", err)
|
||||
}
|
||||
|
||||
item := annotations.Item{
|
||||
OrgId: c.OrgId,
|
||||
UserId: c.UserId,
|
||||
Epoch: cmd.When,
|
||||
Text: text,
|
||||
Tags: tagsArray,
|
||||
}
|
||||
|
||||
if err := repo.Save(&item); err != nil {
|
||||
return ApiError(500, "Failed to save Graphite annotation", err)
|
||||
}
|
||||
|
||||
return ApiSuccess("Graphite Annotation added")
|
||||
}
|
||||
|
||||
func UpdateAnnotation(c *middleware.Context, cmd dtos.UpdateAnnotationsCmd) Response {
|
||||
annotationId := c.ParamsInt64(":annotationId")
|
||||
|
||||
repo := annotations.GetRepository()
|
||||
|
||||
item := annotations.Item{
|
||||
OrgId: c.OrgId,
|
||||
UserId: c.UserId,
|
||||
Id: annotationId,
|
||||
Epoch: cmd.Time / 1000,
|
||||
Text: cmd.Text,
|
||||
Tags: cmd.Tags,
|
||||
}
|
||||
|
||||
if err := repo.Update(&item); err != nil {
|
||||
return ApiError(500, "Failed to update annotation", err)
|
||||
}
|
||||
|
||||
if cmd.IsRegion {
|
||||
itemRight := item
|
||||
itemRight.RegionId = item.Id
|
||||
itemRight.Epoch = cmd.TimeEnd / 1000
|
||||
|
||||
// We don't know id of region right event, so set it to 0 and find then using query like
|
||||
// ... WHERE region_id = <item.RegionId> AND id != <item.RegionId> ...
|
||||
itemRight.Id = 0
|
||||
|
||||
if err := repo.Update(&itemRight); err != nil {
|
||||
return ApiError(500, "Failed to update annotation for region end time", err)
|
||||
}
|
||||
}
|
||||
|
||||
return ApiSuccess("Annotation updated")
|
||||
}
|
||||
|
||||
func DeleteAnnotations(c *middleware.Context, cmd dtos.DeleteAnnotationsCmd) Response {
|
||||
repo := annotations.GetRepository()
|
||||
|
||||
@@ -101,3 +186,33 @@ func DeleteAnnotations(c *middleware.Context, cmd dtos.DeleteAnnotationsCmd) Res
|
||||
|
||||
return ApiSuccess("Annotations deleted")
|
||||
}
|
||||
|
||||
func DeleteAnnotationById(c *middleware.Context) Response {
|
||||
repo := annotations.GetRepository()
|
||||
annotationId := c.ParamsInt64(":annotationId")
|
||||
|
||||
err := repo.Delete(&annotations.DeleteParams{
|
||||
Id: annotationId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return ApiError(500, "Failed to delete annotation", err)
|
||||
}
|
||||
|
||||
return ApiSuccess("Annotation deleted")
|
||||
}
|
||||
|
||||
func DeleteAnnotationRegion(c *middleware.Context) Response {
|
||||
repo := annotations.GetRepository()
|
||||
regionId := c.ParamsInt64(":regionId")
|
||||
|
||||
err := repo.Delete(&annotations.DeleteParams{
|
||||
RegionId: regionId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return ApiError(500, "Failed to delete annotation region", err)
|
||||
}
|
||||
|
||||
return ApiSuccess("Annotation region deleted")
|
||||
}
|
||||
|
||||
@@ -289,6 +289,10 @@ func (hs *HttpServer) registerRoutes() {
|
||||
|
||||
apiRoute.Group("/annotations", func(annotationsRoute RouteRegister) {
|
||||
annotationsRoute.Post("/", bind(dtos.PostAnnotationsCmd{}), wrap(PostAnnotation))
|
||||
annotationsRoute.Delete("/:annotationId", wrap(DeleteAnnotationById))
|
||||
annotationsRoute.Put("/:annotationId", bind(dtos.UpdateAnnotationsCmd{}), wrap(UpdateAnnotation))
|
||||
annotationsRoute.Delete("/region/:regionId", wrap(DeleteAnnotationRegion))
|
||||
annotationsRoute.Post("/graphite", bind(dtos.PostGraphiteAnnotationsCmd{}), wrap(PostGraphiteAnnotation))
|
||||
}, reqEditorRole)
|
||||
|
||||
// error test
|
||||
|
||||
@@ -6,19 +6,22 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"gopkg.in/macaron.v1"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api/pluginproxy"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
macaron "gopkg.in/macaron.v1"
|
||||
)
|
||||
|
||||
var pluginProxyTransport = &http.Transport{
|
||||
var pluginProxyTransport *http.Transport
|
||||
|
||||
func InitAppPluginRoutes(r *macaron.Macaron) {
|
||||
pluginProxyTransport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
InsecureSkipVerify: setting.PluginAppsSkipVerifyTLS,
|
||||
Renegotiation: tls.RenegotiateFreelyAsClient,
|
||||
},
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
@@ -30,7 +33,6 @@ var pluginProxyTransport = &http.Transport{
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
func InitAppPluginRoutes(r *macaron.Macaron) {
|
||||
for _, plugin := range plugins.Apps {
|
||||
for _, route := range plugin.Routes {
|
||||
url := util.JoinUrlFragments("/api/plugin-proxy/"+plugin.Id, route.Path)
|
||||
|
||||
@@ -65,7 +65,7 @@ func New(hash string) *Avatar {
|
||||
return &Avatar{
|
||||
hash: hash,
|
||||
reqParams: url.Values{
|
||||
"d": {"404"},
|
||||
"d": {"retro"},
|
||||
"size": {"200"},
|
||||
"r": {"pg"}}.Encode(),
|
||||
}
|
||||
@@ -146,7 +146,7 @@ func CacheServer() http.Handler {
|
||||
}
|
||||
|
||||
func newNotFound() *Avatar {
|
||||
avatar := &Avatar{}
|
||||
avatar := &Avatar{notFound: true}
|
||||
|
||||
// load transparent png into buffer
|
||||
path := filepath.Join(setting.StaticRootPath, "img", "transparent.png")
|
||||
|
||||
@@ -2,31 +2,22 @@ package dtos
|
||||
|
||||
import "github.com/grafana/grafana/pkg/components/simplejson"
|
||||
|
||||
type Annotation struct {
|
||||
AlertId int64 `json:"alertId"`
|
||||
DashboardId int64 `json:"dashboardId"`
|
||||
PanelId int64 `json:"panelId"`
|
||||
NewState string `json:"newState"`
|
||||
PrevState string `json:"prevState"`
|
||||
Time int64 `json:"time"`
|
||||
Title string `json:"title"`
|
||||
Text string `json:"text"`
|
||||
Metric string `json:"metric"`
|
||||
RegionId int64 `json:"regionId"`
|
||||
Type string `json:"type"`
|
||||
|
||||
Data *simplejson.Json `json:"data"`
|
||||
}
|
||||
|
||||
type PostAnnotationsCmd struct {
|
||||
DashboardId int64 `json:"dashboardId"`
|
||||
PanelId int64 `json:"panelId"`
|
||||
CategoryId int64 `json:"categoryId"`
|
||||
Time int64 `json:"time"`
|
||||
Title string `json:"title"`
|
||||
Text string `json:"text"`
|
||||
Tags []string `json:"tags"`
|
||||
Data *simplejson.Json `json:"data"`
|
||||
IsRegion bool `json:"isRegion"`
|
||||
TimeEnd int64 `json:"timeEnd"`
|
||||
}
|
||||
|
||||
FillColor string `json:"fillColor"`
|
||||
type UpdateAnnotationsCmd struct {
|
||||
Id int64 `json:"id"`
|
||||
Time int64 `json:"time"`
|
||||
Text string `json:"text"`
|
||||
Tags []string `json:"tags"`
|
||||
IsRegion bool `json:"isRegion"`
|
||||
TimeEnd int64 `json:"timeEnd"`
|
||||
}
|
||||
@@ -35,4 +26,13 @@ type DeleteAnnotationsCmd struct {
|
||||
AlertId int64 `json:"alertId"`
|
||||
DashboardId int64 `json:"dashboardId"`
|
||||
PanelId int64 `json:"panelId"`
|
||||
AnnotationId int64 `json:"annotationId"`
|
||||
RegionId int64 `json:"regionId"`
|
||||
}
|
||||
|
||||
type PostGraphiteAnnotationsCmd struct {
|
||||
When int64 `json:"when"`
|
||||
What string `json:"what"`
|
||||
Data string `json:"data"`
|
||||
Tags interface{} `json:"tags"`
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
)
|
||||
|
||||
var grafanaComProxyTransport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: false},
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
gocache "github.com/patrickmn/go-cache"
|
||||
@@ -19,7 +21,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/api/live"
|
||||
httpstatic "github.com/grafana/grafana/pkg/api/static"
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/cmd/grafana-cli/logger"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
@@ -153,7 +154,7 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron {
|
||||
|
||||
for _, route := range plugins.StaticRoutes {
|
||||
pluginRoute := path.Join("/public/plugins/", route.PluginId)
|
||||
logger.Debug("Plugins: Adding route", "route", pluginRoute, "dir", route.Directory)
|
||||
hs.log.Debug("Plugins: Adding route", "route", pluginRoute, "dir", route.Directory)
|
||||
hs.mapStatic(m, route.Directory, "", pluginRoute)
|
||||
}
|
||||
|
||||
@@ -187,7 +188,9 @@ func (hs *HttpServer) metricsEndpoint(ctx *macaron.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
promhttp.Handler().ServeHTTP(ctx.Resp, ctx.Req.Request)
|
||||
promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{
|
||||
DisableCompression: true,
|
||||
}).ServeHTTP(ctx.Resp, ctx.Req.Request)
|
||||
}
|
||||
|
||||
func (hs *HttpServer) healthHandler(ctx *macaron.Context) {
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
@@ -29,6 +29,7 @@ var (
|
||||
ErrSignUpNotAllowed = errors.New("Signup is not allowed for this adapter")
|
||||
ErrUsersQuotaReached = errors.New("Users quota reached")
|
||||
ErrNoEmail = errors.New("Login provider didn't return an email address")
|
||||
oauthLogger = log.New("oauth.login")
|
||||
)
|
||||
|
||||
func GenStateString() string {
|
||||
@@ -50,10 +51,11 @@ func OAuthLogin(ctx *middleware.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
error := ctx.Query("error")
|
||||
if error != "" {
|
||||
errorParam := ctx.Query("error")
|
||||
if errorParam != "" {
|
||||
errorDesc := ctx.Query("error_description")
|
||||
redirectWithError(ctx, ErrProviderDeniedRequest, "error", error, "errorDesc", errorDesc)
|
||||
oauthLogger.Error("failed to login ", "error", errorParam, "errorDesc", errorDesc)
|
||||
redirectWithError(ctx, ErrProviderDeniedRequest, "error", errorParam, "errorDesc", errorDesc)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -69,8 +71,12 @@ func OAuthLogin(ctx *middleware.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// verify state string
|
||||
savedState := ctx.Session.Get(middleware.SESS_KEY_OAUTH_STATE).(string)
|
||||
savedState, ok := ctx.Session.Get(middleware.SESS_KEY_OAUTH_STATE).(string)
|
||||
if !ok {
|
||||
ctx.Handle(500, "login.OAuthLogin(missing saved state)", nil)
|
||||
return
|
||||
}
|
||||
|
||||
queryState := ctx.Query("state")
|
||||
if savedState != queryState {
|
||||
ctx.Handle(500, "login.OAuthLogin(state mismatch)", nil)
|
||||
@@ -78,35 +84,36 @@ func OAuthLogin(ctx *middleware.Context) {
|
||||
}
|
||||
|
||||
// handle call back
|
||||
|
||||
// initialize oauth2 context
|
||||
oauthCtx := oauth2.NoContext
|
||||
if setting.OAuthService.OAuthInfos[name].TlsClientCert != "" {
|
||||
cert, err := tls.LoadX509KeyPair(setting.OAuthService.OAuthInfos[name].TlsClientCert, setting.OAuthService.OAuthInfos[name].TlsClientKey)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: setting.OAuthService.OAuthInfos[name].TlsSkipVerify,
|
||||
},
|
||||
}
|
||||
oauthClient := &http.Client{
|
||||
Transport: tr,
|
||||
}
|
||||
|
||||
// Load CA cert
|
||||
if setting.OAuthService.OAuthInfos[name].TlsClientCert != "" || setting.OAuthService.OAuthInfos[name].TlsClientKey != "" {
|
||||
cert, err := tls.LoadX509KeyPair(setting.OAuthService.OAuthInfos[name].TlsClientCert, setting.OAuthService.OAuthInfos[name].TlsClientKey)
|
||||
if err != nil {
|
||||
log.Fatal(1, "Failed to setup TlsClientCert", "oauth provider", name, "error", err)
|
||||
}
|
||||
|
||||
tr.TLSClientConfig.Certificates = append(tr.TLSClientConfig.Certificates, cert)
|
||||
}
|
||||
|
||||
if setting.OAuthService.OAuthInfos[name].TlsClientCa != "" {
|
||||
caCert, err := ioutil.ReadFile(setting.OAuthService.OAuthInfos[name].TlsClientCa)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal(1, "Failed to setup TlsClientCa", "oauth provider", name, "error", err)
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: caCertPool,
|
||||
},
|
||||
tr.TLSClientConfig.RootCAs = caCertPool
|
||||
}
|
||||
sslcli := &http.Client{Transport: tr}
|
||||
|
||||
oauthCtx = context.Background()
|
||||
oauthCtx = context.WithValue(oauthCtx, oauth2.HTTPClient, sslcli)
|
||||
}
|
||||
oauthCtx := context.WithValue(context.Background(), oauth2.HTTPClient, oauthClient)
|
||||
|
||||
// get token from provider
|
||||
token, err := connect.Exchange(oauthCtx, code)
|
||||
|
||||
@@ -158,7 +158,9 @@ func GetPluginMarkdown(c *middleware.Context) Response {
|
||||
|
||||
return ApiError(500, "Could not get markdown file", err)
|
||||
} else {
|
||||
return Respond(200, content)
|
||||
resp := Respond(200, content)
|
||||
resp.Header("Content-Type", "text/plain; charset=utf-8")
|
||||
return resp
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,8 +17,6 @@ var version = "master"
|
||||
func main() {
|
||||
setupLogging()
|
||||
|
||||
services.Init(version)
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "Grafana cli"
|
||||
app.Usage = ""
|
||||
@@ -44,12 +42,20 @@ func main() {
|
||||
Value: "",
|
||||
EnvVar: "GF_PLUGIN_URL",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure",
|
||||
Usage: "Skip TLS verification (insecure)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "debug, d",
|
||||
Usage: "enable debug logging",
|
||||
},
|
||||
}
|
||||
|
||||
app.Before = func(c *cli.Context) error {
|
||||
services.Init(version, c.GlobalBool("insecure"))
|
||||
return nil
|
||||
}
|
||||
app.Commands = commands.Commands
|
||||
app.CommandNotFound = cmdNotFound
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ var (
|
||||
grafanaVersion string
|
||||
)
|
||||
|
||||
func Init(version string) {
|
||||
func Init(version string, skipTLSVerify bool) {
|
||||
grafanaVersion = version
|
||||
|
||||
tr := &http.Transport{
|
||||
@@ -36,8 +36,9 @@ func Init(version string) {
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: false},
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: skipTLSVerify,
|
||||
},
|
||||
}
|
||||
|
||||
HttpClient = http.Client{
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
_ "github.com/grafana/grafana/pkg/tsdb/influxdb"
|
||||
_ "github.com/grafana/grafana/pkg/tsdb/mysql"
|
||||
_ "github.com/grafana/grafana/pkg/tsdb/opentsdb"
|
||||
|
||||
_ "github.com/grafana/grafana/pkg/tsdb/postgres"
|
||||
_ "github.com/grafana/grafana/pkg/tsdb/prometheus"
|
||||
_ "github.com/grafana/grafana/pkg/tsdb/testdata"
|
||||
)
|
||||
|
||||
@@ -30,9 +30,15 @@ func NewImageUploader() (ImageUploader, error) {
|
||||
|
||||
bucket := s3sec.Key("bucket").MustString("")
|
||||
region := s3sec.Key("region").MustString("")
|
||||
path := s3sec.Key("path").MustString("")
|
||||
bucketUrl := s3sec.Key("bucket_url").MustString("")
|
||||
accessKey := s3sec.Key("access_key").MustString("")
|
||||
secretKey := s3sec.Key("secret_key").MustString("")
|
||||
|
||||
if path != "" && path[len(path)-1:] != "/" {
|
||||
path += "/"
|
||||
}
|
||||
|
||||
if bucket == "" || region == "" {
|
||||
info, err := getRegionAndBucketFromUrl(bucketUrl)
|
||||
if err != nil {
|
||||
@@ -42,7 +48,7 @@ func NewImageUploader() (ImageUploader, error) {
|
||||
region = info.region
|
||||
}
|
||||
|
||||
return NewS3Uploader(region, bucket, "public-read", accessKey, secretKey), nil
|
||||
return NewS3Uploader(region, bucket, path, "public-read", accessKey, secretKey), nil
|
||||
case "webdav":
|
||||
webdavSec, err := setting.Cfg.GetSection("external_image_storage.webdav")
|
||||
if err != nil {
|
||||
|
||||
@@ -19,16 +19,18 @@ import (
|
||||
type S3Uploader struct {
|
||||
region string
|
||||
bucket string
|
||||
path string
|
||||
acl string
|
||||
secretKey string
|
||||
accessKey string
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func NewS3Uploader(region, bucket, acl, accessKey, secretKey string) *S3Uploader {
|
||||
func NewS3Uploader(region, bucket, path, acl, accessKey, secretKey string) *S3Uploader {
|
||||
return &S3Uploader{
|
||||
region: region,
|
||||
bucket: bucket,
|
||||
path: path,
|
||||
acl: acl,
|
||||
accessKey: accessKey,
|
||||
secretKey: secretKey,
|
||||
@@ -56,7 +58,7 @@ func (u *S3Uploader) Upload(ctx context.Context, imageDiskPath string) (string,
|
||||
}
|
||||
|
||||
s3_endpoint, _ := endpoints.DefaultResolver().EndpointFor("s3", u.region)
|
||||
key := util.GetRandomString(20) + ".png"
|
||||
key := u.path + util.GetRandomString(20) + ".png"
|
||||
image_url := s3_endpoint.URL + "/" + u.bucket + "/" + key
|
||||
log.Debug("Uploading image to s3", "url = ", image_url)
|
||||
|
||||
|
||||
@@ -73,11 +73,12 @@ type GetDashboardSnapshotQuery struct {
|
||||
}
|
||||
|
||||
type DashboardSnapshots []*DashboardSnapshot
|
||||
type DashboardSnapshotsList []*DashboardSnapshotDTO
|
||||
|
||||
type GetDashboardSnapshotsQuery struct {
|
||||
Name string
|
||||
Limit int
|
||||
OrgId int64
|
||||
|
||||
Result DashboardSnapshots
|
||||
Result DashboardSnapshotsList
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ const (
|
||||
DS_CLOUDWATCH = "cloudwatch"
|
||||
DS_KAIROSDB = "kairosdb"
|
||||
DS_PROMETHEUS = "prometheus"
|
||||
DS_POSTGRES = "postgres"
|
||||
DS_ACCESS_DIRECT = "direct"
|
||||
DS_ACCESS_PROXY = "proxy"
|
||||
)
|
||||
@@ -62,6 +63,7 @@ var knownDatasourcePlugins map[string]bool = map[string]bool{
|
||||
DS_CLOUDWATCH: true,
|
||||
DS_PROMETHEUS: true,
|
||||
DS_OPENTSDB: true,
|
||||
DS_POSTGRES: true,
|
||||
"opennms": true,
|
||||
"druid": true,
|
||||
"dalmatinerdb": true,
|
||||
|
||||
@@ -3,6 +3,7 @@ package models
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
@@ -45,9 +46,16 @@ func (ds *DataSource) GetHttpTransport() (*http.Transport, error) {
|
||||
return t.Transport, nil
|
||||
}
|
||||
|
||||
var tlsSkipVerify, tlsClientAuth, tlsAuthWithCACert bool
|
||||
if ds.JsonData != nil {
|
||||
tlsClientAuth = ds.JsonData.Get("tlsAuth").MustBool(false)
|
||||
tlsAuthWithCACert = ds.JsonData.Get("tlsAuthWithCACert").MustBool(false)
|
||||
tlsSkipVerify = ds.JsonData.Get("tlsSkipVerify").MustBool(false)
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
InsecureSkipVerify: tlsSkipVerify,
|
||||
Renegotiation: tls.RenegotiateFreelyAsClient,
|
||||
},
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
@@ -62,31 +70,25 @@ func (ds *DataSource) GetHttpTransport() (*http.Transport, error) {
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
}
|
||||
|
||||
var tlsAuth, tlsAuthWithCACert bool
|
||||
if ds.JsonData != nil {
|
||||
tlsAuth = ds.JsonData.Get("tlsAuth").MustBool(false)
|
||||
tlsAuthWithCACert = ds.JsonData.Get("tlsAuthWithCACert").MustBool(false)
|
||||
}
|
||||
|
||||
if tlsAuth {
|
||||
transport.TLSClientConfig.InsecureSkipVerify = false
|
||||
|
||||
if tlsClientAuth || tlsAuthWithCACert {
|
||||
decrypted := ds.SecureJsonData.Decrypt()
|
||||
|
||||
if tlsAuthWithCACert && len(decrypted["tlsCACert"]) > 0 {
|
||||
caPool := x509.NewCertPool()
|
||||
ok := caPool.AppendCertsFromPEM([]byte(decrypted["tlsCACert"]))
|
||||
if ok {
|
||||
if !ok {
|
||||
return nil, errors.New("Failed to parse TLS CA PEM certificate")
|
||||
}
|
||||
transport.TLSClientConfig.RootCAs = caPool
|
||||
}
|
||||
}
|
||||
|
||||
if tlsClientAuth {
|
||||
cert, err := tls.X509KeyPair([]byte(decrypted["tlsClientCert"]), []byte(decrypted["tlsClientKey"]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transport.TLSClientConfig.Certificates = []tls.Certificate{cert}
|
||||
}
|
||||
}
|
||||
|
||||
ptc.cache[ds.Id] = cachedTransport{
|
||||
Transport: transport,
|
||||
|
||||
@@ -29,61 +29,140 @@ func TestDataSourceCache(t *testing.T) {
|
||||
Convey("Should be using the cached proxy", func() {
|
||||
So(t2, ShouldEqual, t1)
|
||||
})
|
||||
Convey("Should verify TLS by default", func() {
|
||||
So(t1.TLSClientConfig.InsecureSkipVerify, ShouldEqual, false)
|
||||
})
|
||||
Convey("Should have no TLS client certificate configured", func() {
|
||||
So(len(t1.TLSClientConfig.Certificates), ShouldEqual, 0)
|
||||
})
|
||||
Convey("Should have no user-supplied TLS CA onfigured", func() {
|
||||
So(t1.TLSClientConfig.RootCAs, ShouldBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When getting kubernetes datasource proxy", t, func() {
|
||||
Convey("When caching a datasource proxy then updating it", t, func() {
|
||||
clearCache()
|
||||
setting.SecretKey = "password"
|
||||
|
||||
json := simplejson.New()
|
||||
json.Set("tlsAuthWithCACert", true)
|
||||
|
||||
tlsCaCert, err := util.Encrypt([]byte(caCert), "password")
|
||||
So(err, ShouldBeNil)
|
||||
ds := DataSource{
|
||||
Id: 1,
|
||||
Url: "http://k8s:8001",
|
||||
Type: "Kubernetes",
|
||||
SecureJsonData: map[string][]byte{"tlsCACert": tlsCaCert},
|
||||
Updated: time.Now().Add(-2 * time.Minute),
|
||||
}
|
||||
|
||||
t1, err := ds.GetHttpTransport()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Should verify TLS by default", func() {
|
||||
So(t1.TLSClientConfig.InsecureSkipVerify, ShouldEqual, false)
|
||||
})
|
||||
Convey("Should have no TLS client certificate configured", func() {
|
||||
So(len(t1.TLSClientConfig.Certificates), ShouldEqual, 0)
|
||||
})
|
||||
Convey("Should have no user-supplied TLS CA configured", func() {
|
||||
So(t1.TLSClientConfig.RootCAs, ShouldBeNil)
|
||||
})
|
||||
|
||||
ds.JsonData = nil
|
||||
ds.SecureJsonData = map[string][]byte{}
|
||||
ds.Updated = time.Now()
|
||||
|
||||
t2, err := ds.GetHttpTransport()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Should have no user-supplied TLS CA configured after the update", func() {
|
||||
So(t2.TLSClientConfig.RootCAs, ShouldBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When caching a datasource proxy with TLS client authentication enabled", t, func() {
|
||||
clearCache()
|
||||
setting.SecretKey = "password"
|
||||
|
||||
json := simplejson.New()
|
||||
json.Set("tlsAuth", true)
|
||||
json.Set("tlsAuthWithCACert", true)
|
||||
|
||||
t := time.Now()
|
||||
tlsClientCert, err := util.Encrypt([]byte(clientCert), "password")
|
||||
So(err, ShouldBeNil)
|
||||
tlsClientKey, err := util.Encrypt([]byte(clientKey), "password")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
ds := DataSource{
|
||||
Id: 1,
|
||||
Url: "http://k8s:8001",
|
||||
Type: "Kubernetes",
|
||||
Updated: t.Add(-2 * time.Minute),
|
||||
}
|
||||
|
||||
transport, err := ds.GetHttpTransport()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Should have no cert", func() {
|
||||
So(transport.TLSClientConfig.InsecureSkipVerify, ShouldEqual, true)
|
||||
})
|
||||
|
||||
ds.JsonData = json
|
||||
|
||||
tlsCaCert, _ := util.Encrypt([]byte(caCert), "password")
|
||||
tlsClientCert, _ := util.Encrypt([]byte(clientCert), "password")
|
||||
tlsClientKey, _ := util.Encrypt([]byte(clientKey), "password")
|
||||
|
||||
ds.SecureJsonData = map[string][]byte{
|
||||
"tlsCACert": tlsCaCert,
|
||||
JsonData: json,
|
||||
SecureJsonData: map[string][]byte{
|
||||
"tlsClientCert": tlsClientCert,
|
||||
"tlsClientKey": tlsClientKey,
|
||||
},
|
||||
}
|
||||
ds.Updated = t.Add(-1 * time.Minute)
|
||||
|
||||
transport, err = ds.GetHttpTransport()
|
||||
tr, err := ds.GetHttpTransport()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Should add cert", func() {
|
||||
So(transport.TLSClientConfig.InsecureSkipVerify, ShouldEqual, false)
|
||||
So(len(transport.TLSClientConfig.Certificates), ShouldEqual, 1)
|
||||
Convey("Should verify TLS by default", func() {
|
||||
So(tr.TLSClientConfig.InsecureSkipVerify, ShouldEqual, false)
|
||||
})
|
||||
Convey("Should have a TLS client certificate configured", func() {
|
||||
So(len(tr.TLSClientConfig.Certificates), ShouldEqual, 1)
|
||||
})
|
||||
})
|
||||
|
||||
ds.JsonData = nil
|
||||
ds.SecureJsonData = map[string][]byte{}
|
||||
ds.Updated = t
|
||||
Convey("When caching a datasource proxy with a user-supplied TLS CA", t, func() {
|
||||
clearCache()
|
||||
setting.SecretKey = "password"
|
||||
|
||||
transport, err = ds.GetHttpTransport()
|
||||
json := simplejson.New()
|
||||
json.Set("tlsAuthWithCACert", true)
|
||||
|
||||
tlsCaCert, err := util.Encrypt([]byte(caCert), "password")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Should remove cert", func() {
|
||||
So(transport.TLSClientConfig.InsecureSkipVerify, ShouldEqual, true)
|
||||
So(len(transport.TLSClientConfig.Certificates), ShouldEqual, 0)
|
||||
ds := DataSource{
|
||||
Id: 1,
|
||||
Url: "http://k8s:8001",
|
||||
Type: "Kubernetes",
|
||||
JsonData: json,
|
||||
SecureJsonData: map[string][]byte{"tlsCACert": tlsCaCert},
|
||||
}
|
||||
|
||||
tr, err := ds.GetHttpTransport()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Should verify TLS by default", func() {
|
||||
So(tr.TLSClientConfig.InsecureSkipVerify, ShouldEqual, false)
|
||||
})
|
||||
Convey("Should have a TLS CA configured", func() {
|
||||
So(len(tr.TLSClientConfig.RootCAs.Subjects()), ShouldEqual, 1)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When caching a datasource proxy when user skips TLS verification", t, func() {
|
||||
clearCache()
|
||||
|
||||
json := simplejson.New()
|
||||
json.Set("tlsSkipVerify", true)
|
||||
|
||||
ds := DataSource{
|
||||
Id: 1,
|
||||
Url: "http://k8s:8001",
|
||||
Type: "Kubernetes",
|
||||
JsonData: json,
|
||||
}
|
||||
|
||||
tr, err := ds.GetHttpTransport()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Should skip TLS verification", func() {
|
||||
So(tr.TLSClientConfig.InsecureSkipVerify, ShouldEqual, true)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -115,7 +194,8 @@ FHoXIyGOdq1chmRVocdGBCF8fUoGIbuF14r53rpvcbEKtKnnP8+96luKAZLq0a4n
|
||||
3lb92xM=
|
||||
-----END CERTIFICATE-----`
|
||||
|
||||
const clientCert string = `-----BEGIN CERTIFICATE-----
|
||||
const clientCert string = `
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICsjCCAZoCCQCcd8sOfstQLzANBgkqhkiG9w0BAQsFADAXMRUwEwYDVQQDDAxj
|
||||
YS1rOHMtc3RobG0wHhcNMTYxMTAyMDkyNTE1WhcNMTcxMTAyMDkyNTE1WjAfMR0w
|
||||
GwYDVQQDDBRhZG0tZGFuaWVsLWs4cy1zdGhsbTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
|
||||
60
pkg/models/tags.go
Normal file
60
pkg/models/tags.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Tag struct {
|
||||
Id int64
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func ParseTagPairs(tagPairs []string) (tags []*Tag) {
|
||||
if tagPairs == nil {
|
||||
return []*Tag{}
|
||||
}
|
||||
|
||||
for _, tagPair := range tagPairs {
|
||||
var tag Tag
|
||||
|
||||
if strings.Contains(tagPair, ":") {
|
||||
keyValue := strings.Split(tagPair, ":")
|
||||
tag.Key = strings.Trim(keyValue[0], " ")
|
||||
tag.Value = strings.Trim(keyValue[1], " ")
|
||||
} else {
|
||||
tag.Key = strings.Trim(tagPair, " ")
|
||||
}
|
||||
|
||||
if tag.Key == "" || ContainsTag(tags, &tag) {
|
||||
continue
|
||||
}
|
||||
|
||||
tags = append(tags, &tag)
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func ContainsTag(existingTags []*Tag, tag *Tag) bool {
|
||||
for _, t := range existingTags {
|
||||
if t.Key == tag.Key && t.Value == tag.Value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func JoinTagPairs(tags []*Tag) []string {
|
||||
tagPairs := []string{}
|
||||
|
||||
for _, tag := range tags {
|
||||
if tag.Value != "" {
|
||||
tagPairs = append(tagPairs, tag.Key+":"+tag.Value)
|
||||
} else {
|
||||
tagPairs = append(tagPairs, tag.Key)
|
||||
}
|
||||
}
|
||||
|
||||
return tagPairs
|
||||
}
|
||||
95
pkg/models/tags_test.go
Normal file
95
pkg/models/tags_test.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestParsingTags(t *testing.T) {
|
||||
Convey("Testing parsing a tag pairs into tags", t, func() {
|
||||
Convey("Can parse one empty tag", func() {
|
||||
tags := ParseTagPairs([]string{""})
|
||||
So(len(tags), ShouldEqual, 0)
|
||||
})
|
||||
|
||||
Convey("Can parse valid tags", func() {
|
||||
tags := ParseTagPairs([]string{"outage", "type:outage", "error"})
|
||||
So(len(tags), ShouldEqual, 3)
|
||||
So(tags[0].Key, ShouldEqual, "outage")
|
||||
So(tags[0].Value, ShouldEqual, "")
|
||||
So(tags[1].Key, ShouldEqual, "type")
|
||||
So(tags[1].Value, ShouldEqual, "outage")
|
||||
So(tags[2].Key, ShouldEqual, "error")
|
||||
So(tags[2].Value, ShouldEqual, "")
|
||||
})
|
||||
|
||||
Convey("Can parse tags with spaces", func() {
|
||||
tags := ParseTagPairs([]string{" outage ", " type : outage ", "error "})
|
||||
So(len(tags), ShouldEqual, 3)
|
||||
So(tags[0].Key, ShouldEqual, "outage")
|
||||
So(tags[0].Value, ShouldEqual, "")
|
||||
So(tags[1].Key, ShouldEqual, "type")
|
||||
So(tags[1].Value, ShouldEqual, "outage")
|
||||
So(tags[2].Key, ShouldEqual, "error")
|
||||
So(tags[2].Value, ShouldEqual, "")
|
||||
})
|
||||
|
||||
Convey("Can parse empty tags", func() {
|
||||
tags := ParseTagPairs([]string{" outage ", "", "", ":", "type : outage ", "error ", "", ""})
|
||||
So(len(tags), ShouldEqual, 3)
|
||||
So(tags[0].Key, ShouldEqual, "outage")
|
||||
So(tags[0].Value, ShouldEqual, "")
|
||||
So(tags[1].Key, ShouldEqual, "type")
|
||||
So(tags[1].Value, ShouldEqual, "outage")
|
||||
So(tags[2].Key, ShouldEqual, "error")
|
||||
So(tags[2].Value, ShouldEqual, "")
|
||||
})
|
||||
|
||||
Convey("Can parse tags with extra colons", func() {
|
||||
tags := ParseTagPairs([]string{" outage", "type : outage:outage2 :outage3 ", "error :"})
|
||||
So(len(tags), ShouldEqual, 3)
|
||||
So(tags[0].Key, ShouldEqual, "outage")
|
||||
So(tags[0].Value, ShouldEqual, "")
|
||||
So(tags[1].Key, ShouldEqual, "type")
|
||||
So(tags[1].Value, ShouldEqual, "outage")
|
||||
So(tags[2].Key, ShouldEqual, "error")
|
||||
So(tags[2].Value, ShouldEqual, "")
|
||||
})
|
||||
|
||||
Convey("Can parse tags that contains key and values with spaces", func() {
|
||||
tags := ParseTagPairs([]string{" outage 1", "type 1: outage 1 ", "has error "})
|
||||
So(len(tags), ShouldEqual, 3)
|
||||
So(tags[0].Key, ShouldEqual, "outage 1")
|
||||
So(tags[0].Value, ShouldEqual, "")
|
||||
So(tags[1].Key, ShouldEqual, "type 1")
|
||||
So(tags[1].Value, ShouldEqual, "outage 1")
|
||||
So(tags[2].Key, ShouldEqual, "has error")
|
||||
So(tags[2].Value, ShouldEqual, "")
|
||||
})
|
||||
|
||||
Convey("Can filter out duplicate tags", func() {
|
||||
tags := ParseTagPairs([]string{"test", "test", "key:val1", "key:val2"})
|
||||
So(len(tags), ShouldEqual, 3)
|
||||
So(tags[0].Key, ShouldEqual, "test")
|
||||
So(tags[0].Value, ShouldEqual, "")
|
||||
So(tags[1].Key, ShouldEqual, "key")
|
||||
So(tags[1].Value, ShouldEqual, "val1")
|
||||
So(tags[2].Key, ShouldEqual, "key")
|
||||
So(tags[2].Value, ShouldEqual, "val2")
|
||||
})
|
||||
|
||||
Convey("Can join tag pairs", func() {
|
||||
tagPairs := []*Tag{
|
||||
{Key: "key1", Value: "val1"},
|
||||
{Key: "key2", Value: ""},
|
||||
{Key: "key3"},
|
||||
}
|
||||
tags := JoinTagPairs(tagPairs)
|
||||
So(len(tags), ShouldEqual, 3)
|
||||
So(tags[0], ShouldEqual, "key1:val1")
|
||||
So(tags[1], ShouldEqual, "key2")
|
||||
So(tags[2], ShouldEqual, "key3")
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -84,15 +84,17 @@ func (this *HipChatNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
return err
|
||||
}
|
||||
|
||||
message := evalContext.GetNotificationTitle() + " in state " + evalContext.GetStateModel().Text + "<br><a href=" + ruleUrl + ">Check Dashboard</a>"
|
||||
fields := make([]map[string]interface{}, 0)
|
||||
message += "<br>"
|
||||
attributes := make([]map[string]interface{}, 0)
|
||||
for index, evt := range evalContext.EvalMatches {
|
||||
message += evt.Metric + " :: " + strconv.FormatFloat(evt.Value.Float64, 'f', -1, 64) + "<br>"
|
||||
fields = append(fields, map[string]interface{}{
|
||||
"title": evt.Metric,
|
||||
"value": evt.Value,
|
||||
"short": true,
|
||||
metricName := evt.Metric
|
||||
if len(metricName) > 50 {
|
||||
metricName = metricName[:50]
|
||||
}
|
||||
attributes = append(attributes, map[string]interface{}{
|
||||
"label": metricName,
|
||||
"value": map[string]interface{}{
|
||||
"label": strconv.FormatFloat(evt.Value.Float64, 'f', -1, 64),
|
||||
},
|
||||
})
|
||||
if index > maxFieldCount {
|
||||
break
|
||||
@@ -100,16 +102,23 @@ func (this *HipChatNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
}
|
||||
|
||||
if evalContext.Error != nil {
|
||||
fields = append(fields, map[string]interface{}{
|
||||
"title": "Error message",
|
||||
"value": evalContext.Error.Error(),
|
||||
"short": false,
|
||||
attributes = append(attributes, map[string]interface{}{
|
||||
"label": "Error message",
|
||||
"value": map[string]interface{}{
|
||||
"label": evalContext.Error.Error(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
message := ""
|
||||
if evalContext.Rule.State != models.AlertStateOK { //dont add message when going back to alert state ok.
|
||||
message += " " + evalContext.Rule.Message
|
||||
}
|
||||
|
||||
if message == "" {
|
||||
message = evalContext.GetNotificationTitle() + " in state " + evalContext.GetStateModel().Text
|
||||
}
|
||||
|
||||
//HipChat has a set list of colors
|
||||
var color string
|
||||
switch evalContext.Rule.State {
|
||||
@@ -123,15 +132,24 @@ func (this *HipChatNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
|
||||
// Add a card with link to the dashboard
|
||||
card := map[string]interface{}{
|
||||
"style": "link",
|
||||
"style": "application",
|
||||
"url": ruleUrl,
|
||||
"id": "1",
|
||||
"title": evalContext.GetNotificationTitle(),
|
||||
"description": evalContext.GetNotificationTitle() + " in state " + evalContext.GetStateModel().Text,
|
||||
"description": message,
|
||||
"icon": map[string]interface{}{
|
||||
"url": "https://grafana.com/assets/img/fav32.png",
|
||||
},
|
||||
"date": evalContext.EndTime.Unix(),
|
||||
"attributes": attributes,
|
||||
}
|
||||
if evalContext.ImagePublicUrl != "" {
|
||||
card["thumbnail"] = map[string]interface{}{
|
||||
"url": evalContext.ImagePublicUrl,
|
||||
"url@2x": evalContext.ImagePublicUrl,
|
||||
"width": 1193,
|
||||
"height": 564,
|
||||
}
|
||||
}
|
||||
|
||||
body := map[string]interface{}{
|
||||
@@ -144,6 +162,7 @@ func (this *HipChatNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
|
||||
hipUrl := fmt.Sprintf("%s/v2/room/%s/notification?auth_token=%s", this.Url, this.RoomId, this.ApiKey)
|
||||
data, _ := json.Marshal(&body)
|
||||
this.log.Info("Request payload", "json", string(data))
|
||||
cmd := &models.SendWebhookSync{Url: hipUrl, Body: string(data)}
|
||||
|
||||
if err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil {
|
||||
|
||||
120
pkg/services/alerting/notifiers/kafka.go
Normal file
120
pkg/services/alerting/notifiers/kafka.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package notifiers
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
)
|
||||
|
||||
func init() {
|
||||
alerting.RegisterNotifier(&alerting.NotifierPlugin{
|
||||
Type: "kafka",
|
||||
Name: "Kafka REST Proxy",
|
||||
Description: "Sends notifications to Kafka Rest Proxy",
|
||||
Factory: NewKafkaNotifier,
|
||||
OptionsTemplate: `
|
||||
<h3 class="page-heading">Kafka settings</h3>
|
||||
<div class="gf-form">
|
||||
<span class="gf-form-label width-14">Kafka REST Proxy</span>
|
||||
<input type="text" required class="gf-form-input max-width-22" ng-model="ctrl.model.settings.kafkaRestProxy" placeholder="http://localhost:8082"></input>
|
||||
</div>
|
||||
<div class="gf-form">
|
||||
<span class="gf-form-label width-14">Topic</span>
|
||||
<input type="text" required class="gf-form-input max-width-22" ng-model="ctrl.model.settings.kafkaTopic" placeholder="topic1"></input>
|
||||
</div>
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
func NewKafkaNotifier(model *m.AlertNotification) (alerting.Notifier, error) {
|
||||
endpoint := model.Settings.Get("kafkaRestProxy").MustString()
|
||||
if endpoint == "" {
|
||||
return nil, alerting.ValidationError{Reason: "Could not find kafka rest proxy endpoint property in settings"}
|
||||
}
|
||||
topic := model.Settings.Get("kafkaTopic").MustString()
|
||||
if topic == "" {
|
||||
return nil, alerting.ValidationError{Reason: "Could not find kafka topic property in settings"}
|
||||
}
|
||||
|
||||
return &KafkaNotifier{
|
||||
NotifierBase: NewNotifierBase(model.Id, model.IsDefault, model.Name, model.Type, model.Settings),
|
||||
Endpoint: endpoint,
|
||||
Topic: topic,
|
||||
log: log.New("alerting.notifier.kafka"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type KafkaNotifier struct {
|
||||
NotifierBase
|
||||
Endpoint string
|
||||
Topic string
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *KafkaNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
|
||||
state := evalContext.Rule.State
|
||||
|
||||
customData := "Triggered metrics:\n\n"
|
||||
for _, evt := range evalContext.EvalMatches {
|
||||
customData = customData + fmt.Sprintf("%s: %v\n", evt.Metric, evt.Value)
|
||||
}
|
||||
|
||||
this.log.Info("Notifying Kafka", "alert_state", state)
|
||||
|
||||
recordJSON := simplejson.New()
|
||||
records := make([]interface{}, 1)
|
||||
|
||||
bodyJSON := simplejson.New()
|
||||
bodyJSON.Set("description", evalContext.Rule.Name+" - "+evalContext.Rule.Message)
|
||||
bodyJSON.Set("client", "Grafana")
|
||||
bodyJSON.Set("details", customData)
|
||||
bodyJSON.Set("incident_key", "alertId-"+strconv.FormatInt(evalContext.Rule.Id, 10))
|
||||
|
||||
ruleUrl, err := evalContext.GetRuleUrl()
|
||||
if err != nil {
|
||||
this.log.Error("Failed get rule link", "error", err)
|
||||
return err
|
||||
}
|
||||
bodyJSON.Set("client_url", ruleUrl)
|
||||
|
||||
if evalContext.ImagePublicUrl != "" {
|
||||
contexts := make([]interface{}, 1)
|
||||
imageJSON := simplejson.New()
|
||||
imageJSON.Set("type", "image")
|
||||
imageJSON.Set("src", evalContext.ImagePublicUrl)
|
||||
contexts[0] = imageJSON
|
||||
bodyJSON.Set("contexts", contexts)
|
||||
}
|
||||
|
||||
valueJSON := simplejson.New()
|
||||
valueJSON.Set("value", bodyJSON)
|
||||
records[0] = valueJSON
|
||||
recordJSON.Set("records", records)
|
||||
body, _ := recordJSON.MarshalJSON()
|
||||
|
||||
topicUrl := this.Endpoint + "/topics/" + this.Topic
|
||||
|
||||
cmd := &m.SendWebhookSync{
|
||||
Url: topicUrl,
|
||||
Body: string(body),
|
||||
HttpMethod: "POST",
|
||||
HttpHeader: map[string]string{
|
||||
"Content-Type": "application/vnd.kafka.json.v2+json",
|
||||
"Accept": "application/vnd.kafka.v2+json",
|
||||
},
|
||||
}
|
||||
|
||||
if err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil {
|
||||
this.log.Error("Failed to send notification to Kafka", "error", err, "body", string(body))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
55
pkg/services/alerting/notifiers/kafka_test.go
Normal file
55
pkg/services/alerting/notifiers/kafka_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package notifiers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestKafkaNotifier(t *testing.T) {
|
||||
Convey("Kafka notifier tests", t, func() {
|
||||
|
||||
Convey("Parsing alert notification from settings", func() {
|
||||
Convey("empty settings should return error", func() {
|
||||
json := `{ }`
|
||||
|
||||
settingsJSON, _ := simplejson.NewJson([]byte(json))
|
||||
model := &m.AlertNotification{
|
||||
Name: "kafka_testing",
|
||||
Type: "kafka",
|
||||
Settings: settingsJSON,
|
||||
}
|
||||
|
||||
_, err := NewKafkaNotifier(model)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("settings should send an event to kafka", func() {
|
||||
json := `
|
||||
{
|
||||
"kafkaRestProxy": "http://localhost:8082",
|
||||
"kafkaTopic": "topic1"
|
||||
}`
|
||||
|
||||
settingsJSON, _ := simplejson.NewJson([]byte(json))
|
||||
model := &m.AlertNotification{
|
||||
Name: "kafka_testing",
|
||||
Type: "kafka",
|
||||
Settings: settingsJSON,
|
||||
}
|
||||
|
||||
not, err := NewKafkaNotifier(model)
|
||||
kafkaNotifier := not.(*KafkaNotifier)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(kafkaNotifier.Name, ShouldEqual, "kafka_testing")
|
||||
So(kafkaNotifier.Type, ShouldEqual, "kafka")
|
||||
So(kafkaNotifier.Endpoint, ShouldEqual, "http://localhost:8082")
|
||||
So(kafkaNotifier.Topic, ShouldEqual, "topic1")
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -37,8 +37,7 @@ func init() {
|
||||
}
|
||||
|
||||
var (
|
||||
opsgenieCreateAlertURL string = "https://api.opsgenie.com/v1/json/alert"
|
||||
opsgenieCloseAlertURL string = "https://api.opsgenie.com/v1/json/alert/close"
|
||||
opsgenieAlertURL string = "https://api.opsgenie.com/v2/alerts"
|
||||
)
|
||||
|
||||
func NewOpsGenieNotifier(model *m.AlertNotification) (alerting.Notifier, error) {
|
||||
@@ -87,7 +86,6 @@ func (this *OpsGenieNotifier) createAlert(evalContext *alerting.EvalContext) err
|
||||
}
|
||||
|
||||
bodyJSON := simplejson.New()
|
||||
bodyJSON.Set("apiKey", this.ApiKey)
|
||||
bodyJSON.Set("message", evalContext.Rule.Name)
|
||||
bodyJSON.Set("source", "Grafana")
|
||||
bodyJSON.Set("alias", "alertId-"+strconv.FormatInt(evalContext.Rule.Id, 10))
|
||||
@@ -103,9 +101,13 @@ func (this *OpsGenieNotifier) createAlert(evalContext *alerting.EvalContext) err
|
||||
body, _ := bodyJSON.MarshalJSON()
|
||||
|
||||
cmd := &m.SendWebhookSync{
|
||||
Url: opsgenieCreateAlertURL,
|
||||
Url: opsgenieAlertURL,
|
||||
Body: string(body),
|
||||
HttpMethod: "POST",
|
||||
HttpHeader: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": fmt.Sprintf("GenieKey %s", this.ApiKey),
|
||||
},
|
||||
}
|
||||
|
||||
if err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil {
|
||||
@@ -119,14 +121,17 @@ func (this *OpsGenieNotifier) closeAlert(evalContext *alerting.EvalContext) erro
|
||||
this.log.Info("Closing OpsGenie alert", "ruleId", evalContext.Rule.Id, "notification", this.Name)
|
||||
|
||||
bodyJSON := simplejson.New()
|
||||
bodyJSON.Set("apiKey", this.ApiKey)
|
||||
bodyJSON.Set("alias", "alertId-"+strconv.FormatInt(evalContext.Rule.Id, 10))
|
||||
bodyJSON.Set("source", "Grafana")
|
||||
body, _ := bodyJSON.MarshalJSON()
|
||||
|
||||
cmd := &m.SendWebhookSync{
|
||||
Url: opsgenieCloseAlertURL,
|
||||
Url: fmt.Sprintf("%s/alertId-%d/close?identifierType=alias", opsgenieAlertURL, evalContext.Rule.Id),
|
||||
Body: string(body),
|
||||
HttpMethod: "POST",
|
||||
HttpHeader: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": fmt.Sprintf("GenieKey %s", this.ApiKey),
|
||||
},
|
||||
}
|
||||
|
||||
if err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil {
|
||||
|
||||
@@ -73,10 +73,8 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error {
|
||||
OrgId: evalContext.Rule.OrgId,
|
||||
DashboardId: evalContext.Rule.DashboardId,
|
||||
PanelId: evalContext.Rule.PanelId,
|
||||
Type: annotations.AlertType,
|
||||
AlertId: evalContext.Rule.Id,
|
||||
Title: evalContext.Rule.Name,
|
||||
Text: evalContext.GetStateModel().Text,
|
||||
Text: "",
|
||||
NewState: string(evalContext.Rule.State),
|
||||
PrevState: string(evalContext.PrevAlertState),
|
||||
Epoch: time.Now().Unix(),
|
||||
|
||||
@@ -5,7 +5,7 @@ import "github.com/grafana/grafana/pkg/components/simplejson"
|
||||
type Repository interface {
|
||||
Save(item *Item) error
|
||||
Update(item *Item) error
|
||||
Find(query *ItemQuery) ([]*Item, error)
|
||||
Find(query *ItemQuery) ([]*ItemDTO, error)
|
||||
Delete(params *DeleteParams) error
|
||||
}
|
||||
|
||||
@@ -13,11 +13,10 @@ type ItemQuery struct {
|
||||
OrgId int64 `json:"orgId"`
|
||||
From int64 `json:"from"`
|
||||
To int64 `json:"to"`
|
||||
Type ItemType `json:"type"`
|
||||
AlertId int64 `json:"alertId"`
|
||||
DashboardId int64 `json:"dashboardId"`
|
||||
PanelId int64 `json:"panelId"`
|
||||
NewState []string `json:"newState"`
|
||||
Tags []string `json:"tags"`
|
||||
|
||||
Limit int64 `json:"limit"`
|
||||
}
|
||||
@@ -28,12 +27,15 @@ type PostParams struct {
|
||||
Epoch int64 `json:"epoch"`
|
||||
Title string `json:"title"`
|
||||
Text string `json:"text"`
|
||||
Icon string `json:"icon"`
|
||||
}
|
||||
|
||||
type DeleteParams struct {
|
||||
Id int64 `json:"id"`
|
||||
AlertId int64 `json:"alertId"`
|
||||
DashboardId int64 `json:"dashboardId"`
|
||||
PanelId int64 `json:"panelId"`
|
||||
RegionId int64 `json:"regionId"`
|
||||
}
|
||||
|
||||
var repositoryInstance Repository
|
||||
@@ -46,29 +48,41 @@ func SetRepository(rep Repository) {
|
||||
repositoryInstance = rep
|
||||
}
|
||||
|
||||
type ItemType string
|
||||
|
||||
const (
|
||||
AlertType ItemType = "alert"
|
||||
EventType ItemType = "event"
|
||||
)
|
||||
|
||||
type Item struct {
|
||||
Id int64 `json:"id"`
|
||||
OrgId int64 `json:"orgId"`
|
||||
UserId int64 `json:"userId"`
|
||||
DashboardId int64 `json:"dashboardId"`
|
||||
PanelId int64 `json:"panelId"`
|
||||
CategoryId int64 `json:"categoryId"`
|
||||
RegionId int64 `json:"regionId"`
|
||||
Type ItemType `json:"type"`
|
||||
Title string `json:"title"`
|
||||
Text string `json:"text"`
|
||||
Metric string `json:"metric"`
|
||||
AlertId int64 `json:"alertId"`
|
||||
UserId int64 `json:"userId"`
|
||||
PrevState string `json:"prevState"`
|
||||
NewState string `json:"newState"`
|
||||
Epoch int64 `json:"epoch"`
|
||||
Tags []string `json:"tags"`
|
||||
Data *simplejson.Json `json:"data"`
|
||||
|
||||
// needed until we remove it from db
|
||||
Type string
|
||||
Title string
|
||||
}
|
||||
|
||||
type ItemDTO struct {
|
||||
Id int64 `json:"id"`
|
||||
AlertId int64 `json:"alertId"`
|
||||
AlertName string `json:"alertName"`
|
||||
DashboardId int64 `json:"dashboardId"`
|
||||
PanelId int64 `json:"panelId"`
|
||||
UserId int64 `json:"userId"`
|
||||
NewState string `json:"newState"`
|
||||
PrevState string `json:"prevState"`
|
||||
Time int64 `json:"time"`
|
||||
Text string `json:"text"`
|
||||
RegionId int64 `json:"regionId"`
|
||||
Tags []string `json:"tags"`
|
||||
Login string `json:"login"`
|
||||
Email string `json:"email"`
|
||||
AvatarUrl string `json:"avatarUrl"`
|
||||
Data *simplejson.Json `json:"data"`
|
||||
}
|
||||
|
||||
@@ -100,13 +100,13 @@ func HandleAlertsQuery(query *m.GetAlertsQuery) error {
|
||||
sql.WriteString(")")
|
||||
}
|
||||
|
||||
sql.WriteString(" ORDER BY name ASC")
|
||||
|
||||
if query.Limit != 0 {
|
||||
sql.WriteString(" LIMIT ?")
|
||||
params = append(params, query.Limit)
|
||||
}
|
||||
|
||||
sql.WriteString(" ORDER BY name ASC")
|
||||
|
||||
alerts := make([]*m.Alert, 0)
|
||||
if err := x.Sql(sql.String(), params...).Find(&alerts); err != nil {
|
||||
return err
|
||||
|
||||
@@ -2,9 +2,11 @@ package sqlstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/annotations"
|
||||
)
|
||||
|
||||
@@ -13,19 +15,94 @@ type SqlAnnotationRepo struct {
|
||||
|
||||
func (r *SqlAnnotationRepo) Save(item *annotations.Item) error {
|
||||
return inTransaction(func(sess *DBSession) error {
|
||||
|
||||
tags := models.ParseTagPairs(item.Tags)
|
||||
item.Tags = models.JoinTagPairs(tags)
|
||||
if _, err := sess.Table("annotation").Insert(item); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if item.Tags != nil {
|
||||
if tags, err := r.ensureTagsExist(sess, tags); err != nil {
|
||||
return err
|
||||
} else {
|
||||
for _, tag := range tags {
|
||||
if _, err := sess.Exec("INSERT INTO annotation_tag (annotation_id, tag_id) VALUES(?,?)", item.Id, tag.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Will insert if needed any new key/value pars and return ids
|
||||
func (r *SqlAnnotationRepo) ensureTagsExist(sess *DBSession, tags []*models.Tag) ([]*models.Tag, error) {
|
||||
for _, tag := range tags {
|
||||
var existingTag models.Tag
|
||||
|
||||
// check if it exists
|
||||
if exists, err := sess.Table("tag").Where("key=? AND value=?", tag.Key, tag.Value).Get(&existingTag); err != nil {
|
||||
return nil, err
|
||||
} else if exists {
|
||||
tag.Id = existingTag.Id
|
||||
} else {
|
||||
if _, err := sess.Table("tag").Insert(tag); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (r *SqlAnnotationRepo) Update(item *annotations.Item) error {
|
||||
return inTransaction(func(sess *DBSession) error {
|
||||
var (
|
||||
isExist bool
|
||||
err error
|
||||
)
|
||||
existing := new(annotations.Item)
|
||||
|
||||
if _, err := sess.Table("annotation").Id(item.Id).Update(item); err != nil {
|
||||
if item.Id == 0 && item.RegionId != 0 {
|
||||
// Update region end time
|
||||
isExist, err = sess.Table("annotation").Where("region_id=? AND id!=? AND org_id=?", item.RegionId, item.RegionId, item.OrgId).Get(existing)
|
||||
} else {
|
||||
isExist, err = sess.Table("annotation").Where("id=? AND org_id=?", item.Id, item.OrgId).Get(existing)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isExist {
|
||||
return errors.New("Annotation not found")
|
||||
}
|
||||
|
||||
existing.Epoch = item.Epoch
|
||||
existing.Text = item.Text
|
||||
if item.RegionId != 0 {
|
||||
existing.RegionId = item.RegionId
|
||||
}
|
||||
|
||||
if item.Tags != nil {
|
||||
if tags, err := r.ensureTagsExist(sess, models.ParseTagPairs(item.Tags)); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if _, err := sess.Exec("DELETE FROM annotation_tag WHERE annotation_id = ?", existing.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tag := range tags {
|
||||
if _, err := sess.Exec("INSERT INTO annotation_tag (annotation_id, tag_id) VALUES(?,?)", existing.Id, tag.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
existing.Tags = item.Tags
|
||||
|
||||
if _, err := sess.Table("annotation").Id(existing.Id).Cols("epoch", "text", "region_id", "tags").Update(existing); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -33,51 +110,79 @@ func (r *SqlAnnotationRepo) Update(item *annotations.Item) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (r *SqlAnnotationRepo) Find(query *annotations.ItemQuery) ([]*annotations.Item, error) {
|
||||
func (r *SqlAnnotationRepo) Find(query *annotations.ItemQuery) ([]*annotations.ItemDTO, error) {
|
||||
var sql bytes.Buffer
|
||||
params := make([]interface{}, 0)
|
||||
|
||||
sql.WriteString(`SELECT *
|
||||
from annotation
|
||||
sql.WriteString(`
|
||||
SELECT
|
||||
annotation.id,
|
||||
annotation.epoch as time,
|
||||
annotation.dashboard_id,
|
||||
annotation.panel_id,
|
||||
annotation.new_state,
|
||||
annotation.prev_state,
|
||||
annotation.alert_id,
|
||||
annotation.region_id,
|
||||
annotation.text,
|
||||
annotation.tags,
|
||||
annotation.data,
|
||||
usr.email,
|
||||
usr.login,
|
||||
alert.name as alert_name
|
||||
FROM annotation
|
||||
LEFT OUTER JOIN ` + dialect.Quote("user") + ` as usr on usr.id = annotation.user_id
|
||||
LEFT OUTER JOIN alert on alert.id = annotation.alert_id
|
||||
`)
|
||||
|
||||
sql.WriteString(`WHERE org_id = ?`)
|
||||
sql.WriteString(`WHERE annotation.org_id = ?`)
|
||||
params = append(params, query.OrgId)
|
||||
|
||||
if query.AlertId != 0 {
|
||||
sql.WriteString(` AND alert_id = ?`)
|
||||
params = append(params, query.AlertId)
|
||||
}
|
||||
|
||||
if query.AlertId != 0 {
|
||||
sql.WriteString(` AND alert_id = ?`)
|
||||
sql.WriteString(` AND annotation.alert_id = ?`)
|
||||
params = append(params, query.AlertId)
|
||||
}
|
||||
|
||||
if query.DashboardId != 0 {
|
||||
sql.WriteString(` AND dashboard_id = ?`)
|
||||
sql.WriteString(` AND annotation.dashboard_id = ?`)
|
||||
params = append(params, query.DashboardId)
|
||||
}
|
||||
|
||||
if query.PanelId != 0 {
|
||||
sql.WriteString(` AND panel_id = ?`)
|
||||
sql.WriteString(` AND annotation.panel_id = ?`)
|
||||
params = append(params, query.PanelId)
|
||||
}
|
||||
|
||||
if query.From > 0 && query.To > 0 {
|
||||
sql.WriteString(` AND epoch BETWEEN ? AND ?`)
|
||||
sql.WriteString(` AND annotation.epoch BETWEEN ? AND ?`)
|
||||
params = append(params, query.From, query.To)
|
||||
}
|
||||
|
||||
if query.Type != "" {
|
||||
sql.WriteString(` AND type = ?`)
|
||||
params = append(params, string(query.Type))
|
||||
if len(query.Tags) > 0 {
|
||||
keyValueFilters := []string{}
|
||||
|
||||
tags := models.ParseTagPairs(query.Tags)
|
||||
for _, tag := range tags {
|
||||
if tag.Value == "" {
|
||||
keyValueFilters = append(keyValueFilters, "(tag.key = ?)")
|
||||
params = append(params, tag.Key)
|
||||
} else {
|
||||
keyValueFilters = append(keyValueFilters, "(tag.key = ? AND tag.value = ?)")
|
||||
params = append(params, tag.Key, tag.Value)
|
||||
}
|
||||
}
|
||||
|
||||
if len(query.NewState) > 0 {
|
||||
sql.WriteString(` AND new_state IN (?` + strings.Repeat(",?", len(query.NewState)-1) + ")")
|
||||
for _, v := range query.NewState {
|
||||
params = append(params, v)
|
||||
if len(tags) > 0 {
|
||||
tagsSubQuery := fmt.Sprintf(`
|
||||
SELECT SUM(1) FROM annotation_tag at
|
||||
INNER JOIN tag on tag.id = at.tag_id
|
||||
WHERE at.annotation_id = annotation.id
|
||||
AND (
|
||||
%s
|
||||
)
|
||||
`, strings.Join(keyValueFilters, " OR "))
|
||||
|
||||
sql.WriteString(fmt.Sprintf(" AND (%s) = %d ", tagsSubQuery, len(tags)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,7 +192,7 @@ func (r *SqlAnnotationRepo) Find(query *annotations.ItemQuery) ([]*annotations.I
|
||||
|
||||
sql.WriteString(fmt.Sprintf(" ORDER BY epoch DESC LIMIT %v", query.Limit))
|
||||
|
||||
items := make([]*annotations.Item, 0)
|
||||
items := make([]*annotations.ItemDTO, 0)
|
||||
if err := x.Sql(sql.String(), params...).Find(&items); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -97,11 +202,31 @@ func (r *SqlAnnotationRepo) Find(query *annotations.ItemQuery) ([]*annotations.I
|
||||
|
||||
func (r *SqlAnnotationRepo) Delete(params *annotations.DeleteParams) error {
|
||||
return inTransaction(func(sess *DBSession) error {
|
||||
var (
|
||||
sql string
|
||||
annoTagSql string
|
||||
queryParams []interface{}
|
||||
)
|
||||
|
||||
sql := "DELETE FROM annotation WHERE dashboard_id = ? AND panel_id = ?"
|
||||
if params.RegionId != 0 {
|
||||
annoTagSql = "DELETE FROM annotation_tag WHERE annotation_id IN (SELECT id FROM annotation WHERE region_id = ?)"
|
||||
sql = "DELETE FROM annotation WHERE region_id = ?"
|
||||
queryParams = []interface{}{params.RegionId}
|
||||
} else if params.Id != 0 {
|
||||
annoTagSql = "DELETE FROM annotation_tag WHERE annotation_id IN (SELECT id FROM annotation WHERE id = ?)"
|
||||
sql = "DELETE FROM annotation WHERE id = ?"
|
||||
queryParams = []interface{}{params.Id}
|
||||
} else {
|
||||
annoTagSql = "DELETE FROM annotation_tag WHERE annotation_id IN (SELECT id FROM annotation WHERE dashboard_id = ? AND panel_id = ?)"
|
||||
sql = "DELETE FROM annotation WHERE dashboard_id = ? AND panel_id = ?"
|
||||
queryParams = []interface{}{params.DashboardId, params.PanelId}
|
||||
}
|
||||
|
||||
_, err := sess.Exec(sql, params.DashboardId, params.PanelId)
|
||||
if err != nil {
|
||||
if _, err := sess.Exec(annoTagSql, queryParams...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := sess.Exec(sql, queryParams...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
208
pkg/services/sqlstore/annotation_test.go
Normal file
208
pkg/services/sqlstore/annotation_test.go
Normal file
@@ -0,0 +1,208 @@
|
||||
package sqlstore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/annotations"
|
||||
)
|
||||
|
||||
func TestSavingTags(t *testing.T) {
|
||||
Convey("Testing annotation saving/loading", t, func() {
|
||||
InitTestDB(t)
|
||||
|
||||
repo := SqlAnnotationRepo{}
|
||||
|
||||
Convey("Can save tags", func() {
|
||||
tagPairs := []*models.Tag{
|
||||
{Key: "outage"},
|
||||
{Key: "type", Value: "outage"},
|
||||
{Key: "server", Value: "server-1"},
|
||||
{Key: "error"},
|
||||
}
|
||||
tags, err := repo.ensureTagsExist(newSession(), tagPairs)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(len(tags), ShouldEqual, 4)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestAnnotations(t *testing.T) {
|
||||
Convey("Testing annotation saving/loading", t, func() {
|
||||
InitTestDB(t)
|
||||
|
||||
repo := SqlAnnotationRepo{}
|
||||
|
||||
Convey("Can save annotation", func() {
|
||||
err := repo.Save(&annotations.Item{
|
||||
OrgId: 1,
|
||||
UserId: 1,
|
||||
DashboardId: 1,
|
||||
Text: "hello",
|
||||
Epoch: 10,
|
||||
Tags: []string{"outage", "error", "type:outage", "server:server-1"},
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Can query for annotation", func() {
|
||||
items, err := repo.Find(&annotations.ItemQuery{
|
||||
OrgId: 1,
|
||||
DashboardId: 1,
|
||||
From: 0,
|
||||
To: 15,
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(items, ShouldHaveLength, 1)
|
||||
|
||||
Convey("Can read tags", func() {
|
||||
So(items[0].Tags, ShouldResemble, []string{"outage", "error", "type:outage", "server:server-1"})
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Should not find any when item is outside time range", func() {
|
||||
items, err := repo.Find(&annotations.ItemQuery{
|
||||
OrgId: 1,
|
||||
DashboardId: 1,
|
||||
From: 12,
|
||||
To: 15,
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(items, ShouldHaveLength, 0)
|
||||
})
|
||||
|
||||
Convey("Should not find one when tag filter does not match", func() {
|
||||
items, err := repo.Find(&annotations.ItemQuery{
|
||||
OrgId: 1,
|
||||
DashboardId: 1,
|
||||
From: 1,
|
||||
To: 15,
|
||||
Tags: []string{"asd"},
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(items, ShouldHaveLength, 0)
|
||||
})
|
||||
|
||||
Convey("Should find one when all tag filters does match", func() {
|
||||
items, err := repo.Find(&annotations.ItemQuery{
|
||||
OrgId: 1,
|
||||
DashboardId: 1,
|
||||
From: 1,
|
||||
To: 15,
|
||||
Tags: []string{"outage", "error"},
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(items, ShouldHaveLength, 1)
|
||||
})
|
||||
|
||||
Convey("Should find one when all key value tag filters does match", func() {
|
||||
items, err := repo.Find(&annotations.ItemQuery{
|
||||
OrgId: 1,
|
||||
DashboardId: 1,
|
||||
From: 1,
|
||||
To: 15,
|
||||
Tags: []string{"type:outage", "server:server-1"},
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(items, ShouldHaveLength, 1)
|
||||
})
|
||||
|
||||
Convey("Can update annotation and remove all tags", func() {
|
||||
query := &annotations.ItemQuery{
|
||||
OrgId: 1,
|
||||
DashboardId: 1,
|
||||
From: 0,
|
||||
To: 15,
|
||||
}
|
||||
items, err := repo.Find(query)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationId := items[0].Id
|
||||
|
||||
err = repo.Update(&annotations.Item{
|
||||
Id: annotationId,
|
||||
OrgId: 1,
|
||||
Text: "something new",
|
||||
Tags: []string{},
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
items, err = repo.Find(query)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Can read tags", func() {
|
||||
So(items[0].Id, ShouldEqual, annotationId)
|
||||
So(len(items[0].Tags), ShouldEqual, 0)
|
||||
So(items[0].Text, ShouldEqual, "something new")
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Can update annotation with new tags", func() {
|
||||
query := &annotations.ItemQuery{
|
||||
OrgId: 1,
|
||||
DashboardId: 1,
|
||||
From: 0,
|
||||
To: 15,
|
||||
}
|
||||
items, err := repo.Find(query)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationId := items[0].Id
|
||||
|
||||
err = repo.Update(&annotations.Item{
|
||||
Id: annotationId,
|
||||
OrgId: 1,
|
||||
Text: "something new",
|
||||
Tags: []string{"newtag1", "newtag2"},
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
items, err = repo.Find(query)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Can read tags", func() {
|
||||
So(items[0].Id, ShouldEqual, annotationId)
|
||||
So(items[0].Tags, ShouldResemble, []string{"newtag1", "newtag2"})
|
||||
So(items[0].Text, ShouldEqual, "something new")
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Can delete annotation", func() {
|
||||
query := &annotations.ItemQuery{
|
||||
OrgId: 1,
|
||||
DashboardId: 1,
|
||||
From: 0,
|
||||
To: 15,
|
||||
}
|
||||
items, err := repo.Find(query)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationId := items[0].Id
|
||||
|
||||
err = repo.Delete(&annotations.DeleteParams{Id: annotationId})
|
||||
|
||||
items, err = repo.Find(query)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Should be deleted", func() {
|
||||
So(len(items), ShouldEqual, 0)
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -261,6 +261,7 @@ func DeleteDashboard(cmd *m.DeleteDashboardCommand) error {
|
||||
"DELETE FROM dashboard WHERE id = ?",
|
||||
"DELETE FROM playlist_item WHERE type = 'dashboard_by_id' AND value = ?",
|
||||
"DELETE FROM dashboard_version WHERE dashboard_id = ?",
|
||||
"DELETE FROM annotation WHERE dashboard_id = ?",
|
||||
}
|
||||
|
||||
for _, sql := range deletes {
|
||||
|
||||
@@ -86,9 +86,10 @@ func GetDashboardSnapshot(query *m.GetDashboardSnapshotQuery) error {
|
||||
}
|
||||
|
||||
func SearchDashboardSnapshots(query *m.GetDashboardSnapshotsQuery) error {
|
||||
var snapshots = make(m.DashboardSnapshots, 0)
|
||||
var snapshots = make(m.DashboardSnapshotsList, 0)
|
||||
|
||||
sess := x.Limit(query.Limit)
|
||||
sess.Table("dashboard_snapshot")
|
||||
|
||||
if query.Name != "" {
|
||||
sess.Where("name LIKE ?", query.Name)
|
||||
|
||||
@@ -57,4 +57,37 @@ func addAnnotationMig(mg *Migrator) {
|
||||
mg.AddMigration("Add column region_id to annotation table", NewAddColumnMigration(table, &Column{
|
||||
Name: "region_id", Type: DB_BigInt, Nullable: true, Default: "0",
|
||||
}))
|
||||
|
||||
categoryIdIndex := &Index{Cols: []string{"org_id", "category_id"}, Type: IndexType}
|
||||
mg.AddMigration("Drop category_id index", NewDropIndexMigration(table, categoryIdIndex))
|
||||
|
||||
mg.AddMigration("Add column tags to annotation table", NewAddColumnMigration(table, &Column{
|
||||
Name: "tags", Type: DB_NVarchar, Nullable: true, Length: 500,
|
||||
}))
|
||||
|
||||
///
|
||||
/// Annotation tag
|
||||
///
|
||||
annotationTagTable := Table{
|
||||
Name: "annotation_tag",
|
||||
Columns: []*Column{
|
||||
{Name: "annotation_id", Type: DB_BigInt, Nullable: false},
|
||||
{Name: "tag_id", Type: DB_BigInt, Nullable: false},
|
||||
},
|
||||
Indices: []*Index{
|
||||
{Cols: []string{"annotation_id", "tag_id"}, Type: UniqueIndex},
|
||||
},
|
||||
}
|
||||
|
||||
mg.AddMigration("Create annotation_tag table v2", NewAddTableMigration(annotationTagTable))
|
||||
mg.AddMigration("Add unique index annotation_tag.annotation_id_tag_id", NewAddIndexMigration(annotationTagTable, annotationTagTable.Indices[0]))
|
||||
|
||||
//
|
||||
// clear alert text
|
||||
//
|
||||
updateTextFieldSql := "UPDATE annotation SET TEXT = '' WHERE alert_id > 0"
|
||||
mg.AddMigration("Update alert annotations and set TEXT to empty", new(RawSqlMigration).
|
||||
Sqlite(updateTextFieldSql).
|
||||
Postgres(updateTextFieldSql).
|
||||
Mysql(updateTextFieldSql))
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ func AddMigrations(mg *Migrator) {
|
||||
addAnnotationMig(mg)
|
||||
addTestDataMigrations(mg)
|
||||
addDashboardVersionMigration(mg)
|
||||
addTagMigration(mg)
|
||||
}
|
||||
|
||||
func addMigrationLogMigrations(mg *Migrator) {
|
||||
|
||||
24
pkg/services/sqlstore/migrations/tag_mig.go
Normal file
24
pkg/services/sqlstore/migrations/tag_mig.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package migrations
|
||||
|
||||
import . "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
|
||||
func addTagMigration(mg *Migrator) {
|
||||
|
||||
tagTable := Table{
|
||||
Name: "tag",
|
||||
Columns: []*Column{
|
||||
{Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true},
|
||||
{Name: "key", Type: DB_NVarchar, Length: 100, Nullable: false},
|
||||
{Name: "value", Type: DB_NVarchar, Length: 100, Nullable: false},
|
||||
},
|
||||
Indices: []*Index{
|
||||
{Cols: []string{"key", "value"}, Type: UniqueIndex},
|
||||
},
|
||||
}
|
||||
|
||||
// create table
|
||||
mg.AddMigration("create tag table", NewAddTableMigration(tagTable))
|
||||
|
||||
// create indices
|
||||
mg.AddMigration("add index tag.key_value", NewAddIndexMigration(tagTable, tagTable.Indices[0]))
|
||||
}
|
||||
@@ -104,7 +104,7 @@ func (db *Postgres) SqlType(c *Column) string {
|
||||
|
||||
func (db *Postgres) TableCheckSql(tableName string) (string, []interface{}) {
|
||||
args := []interface{}{"grafana", tableName}
|
||||
sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?"
|
||||
sql := "SELECT table_name FROM information_schema.tables WHERE table_schema=? and table_name=?"
|
||||
return sql, args
|
||||
}
|
||||
|
||||
|
||||
@@ -96,6 +96,7 @@ func CreateUser(cmd *m.CreateUserCommand) error {
|
||||
EmailVerified: cmd.EmailVerified,
|
||||
Created: time.Now(),
|
||||
Updated: time.Now(),
|
||||
LastSeenAt: time.Now().AddDate(-10, 0, 0),
|
||||
}
|
||||
|
||||
if len(cmd.Password) > 0 {
|
||||
|
||||
@@ -122,6 +122,9 @@ var (
|
||||
// Basic Auth
|
||||
BasicAuthEnabled bool
|
||||
|
||||
// Plugin settings
|
||||
PluginAppsSkipVerifyTLS bool
|
||||
|
||||
// Session settings.
|
||||
SessionOptions session.Options
|
||||
|
||||
@@ -560,6 +563,9 @@ func NewConfigContext(args *CommandLineArgs) error {
|
||||
authBasic := Cfg.Section("auth.basic")
|
||||
BasicAuthEnabled = authBasic.Key("enabled").MustBool(true)
|
||||
|
||||
// global plugin settings
|
||||
PluginAppsSkipVerifyTLS = Cfg.Section("plugins").Key("app_tls_skip_verify_insecure").MustBool(false)
|
||||
|
||||
// PhantomJS rendering
|
||||
ImagesDir = filepath.Join(DataPath, "png")
|
||||
PhantomDir = filepath.Join(HomePath, "vendor/phantomjs")
|
||||
|
||||
@@ -13,6 +13,7 @@ type OAuthInfo struct {
|
||||
TlsClientCert string
|
||||
TlsClientKey string
|
||||
TlsClientCa string
|
||||
TlsSkipVerify bool
|
||||
}
|
||||
|
||||
type OAuther struct {
|
||||
|
||||
@@ -66,6 +66,7 @@ func NewOAuthService() {
|
||||
TlsClientCert: sec.Key("tls_client_cert").String(),
|
||||
TlsClientKey: sec.Key("tls_client_key").String(),
|
||||
TlsClientCa: sec.Key("tls_client_ca").String(),
|
||||
TlsSkipVerify: sec.Key("tls_skip_verify_insecure").MustBool(),
|
||||
}
|
||||
|
||||
if !info.Enabled {
|
||||
|
||||
@@ -11,26 +11,21 @@ import (
|
||||
const rsIdentifier = `([_a-zA-Z0-9]+)`
|
||||
const sExpr = `\$` + rsIdentifier + `\(([^\)]*)\)`
|
||||
|
||||
type SqlMacroEngine interface {
|
||||
Interpolate(sql string) (string, error)
|
||||
}
|
||||
|
||||
type MySqlMacroEngine struct {
|
||||
TimeRange *tsdb.TimeRange
|
||||
}
|
||||
|
||||
func NewMysqlMacroEngine(timeRange *tsdb.TimeRange) SqlMacroEngine {
|
||||
return &MySqlMacroEngine{
|
||||
TimeRange: timeRange,
|
||||
}
|
||||
func NewMysqlMacroEngine() tsdb.SqlMacroEngine {
|
||||
return &MySqlMacroEngine{}
|
||||
}
|
||||
|
||||
func (m *MySqlMacroEngine) Interpolate(sql string) (string, error) {
|
||||
func (m *MySqlMacroEngine) Interpolate(timeRange *tsdb.TimeRange, sql string) (string, error) {
|
||||
m.TimeRange = timeRange
|
||||
rExp, _ := regexp.Compile(sExpr)
|
||||
var macroError error
|
||||
|
||||
sql = ReplaceAllStringSubmatchFunc(rExp, sql, func(groups []string) string {
|
||||
res, err := m.EvaluateMacro(groups[1], groups[2:])
|
||||
sql = replaceAllStringSubmatchFunc(rExp, sql, func(groups []string) string {
|
||||
res, err := m.evaluateMacro(groups[1], groups[2:])
|
||||
if err != nil && macroError == nil {
|
||||
macroError = err
|
||||
return "macro_error()"
|
||||
@@ -45,7 +40,7 @@ func (m *MySqlMacroEngine) Interpolate(sql string) (string, error) {
|
||||
return sql, nil
|
||||
}
|
||||
|
||||
func ReplaceAllStringSubmatchFunc(re *regexp.Regexp, str string, repl func([]string) string) string {
|
||||
func replaceAllStringSubmatchFunc(re *regexp.Regexp, str string, repl func([]string) string) string {
|
||||
result := ""
|
||||
lastIndex := 0
|
||||
|
||||
@@ -62,7 +57,7 @@ func ReplaceAllStringSubmatchFunc(re *regexp.Regexp, str string, repl func([]str
|
||||
return result + str[lastIndex:]
|
||||
}
|
||||
|
||||
func (m *MySqlMacroEngine) EvaluateMacro(name string, args []string) (string, error) {
|
||||
func (m *MySqlMacroEngine) evaluateMacro(name string, args []string) (string, error) {
|
||||
switch name {
|
||||
case "__time":
|
||||
if len(args) == 0 {
|
||||
|
||||
@@ -9,86 +9,60 @@ import (
|
||||
|
||||
func TestMacroEngine(t *testing.T) {
|
||||
Convey("MacroEngine", t, func() {
|
||||
engine := &MySqlMacroEngine{}
|
||||
timeRange := &tsdb.TimeRange{From: "5m", To: "now"}
|
||||
|
||||
Convey("interpolate __time function", func() {
|
||||
engine := &MySqlMacroEngine{}
|
||||
|
||||
sql, err := engine.Interpolate("select $__time(time_column)")
|
||||
sql, err := engine.Interpolate(nil, "select $__time(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select UNIX_TIMESTAMP(time_column) as time_sec")
|
||||
})
|
||||
|
||||
Convey("interpolate __time function wrapped in aggregation", func() {
|
||||
engine := &MySqlMacroEngine{}
|
||||
|
||||
sql, err := engine.Interpolate("select min($__time(time_column))")
|
||||
sql, err := engine.Interpolate(nil, "select min($__time(time_column))")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select min(UNIX_TIMESTAMP(time_column) as time_sec)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("WHERE $__timeFilter(time_column)")
|
||||
sql, err := engine.Interpolate(timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "WHERE time_column >= FROM_UNIXTIME(18446744066914186738) AND time_column <= FROM_UNIXTIME(18446744066914187038)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__timeFrom(time_column)")
|
||||
sql, err := engine.Interpolate(timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914186738)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__timeTo(time_column)")
|
||||
sql, err := engine.Interpolate(timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914187038)")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__unixEpochFilter(18446744066914186738)")
|
||||
sql, err := engine.Interpolate(timeRange, "select $__unixEpochFilter(18446744066914186738)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738 >= 18446744066914186738 AND 18446744066914186738 <= 18446744066914187038")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__unixEpochFrom()")
|
||||
sql, err := engine.Interpolate(timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__unixEpochTo()")
|
||||
sql, err := engine.Interpolate(timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914187038")
|
||||
|
||||
@@ -6,142 +6,57 @@ import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
"github.com/go-xorm/core"
|
||||
"github.com/go-xorm/xorm"
|
||||
"github.com/grafana/grafana/pkg/components/null"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
)
|
||||
|
||||
type MysqlExecutor struct {
|
||||
engine *xorm.Engine
|
||||
type MysqlQueryEndpoint struct {
|
||||
sqlEngine tsdb.SqlEngine
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
type engineCacheType struct {
|
||||
cache map[int64]*xorm.Engine
|
||||
versions map[int64]int
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
var engineCache = engineCacheType{
|
||||
cache: make(map[int64]*xorm.Engine),
|
||||
versions: make(map[int64]int),
|
||||
}
|
||||
|
||||
func init() {
|
||||
tsdb.RegisterTsdbQueryEndpoint("mysql", NewMysqlExecutor)
|
||||
tsdb.RegisterTsdbQueryEndpoint("mysql", NewMysqlQueryEndpoint)
|
||||
}
|
||||
|
||||
func NewMysqlExecutor(datasource *models.DataSource) (tsdb.TsdbQueryEndpoint, error) {
|
||||
executor := &MysqlExecutor{
|
||||
func NewMysqlQueryEndpoint(datasource *models.DataSource) (tsdb.TsdbQueryEndpoint, error) {
|
||||
endpoint := &MysqlQueryEndpoint{
|
||||
log: log.New("tsdb.mysql"),
|
||||
}
|
||||
|
||||
err := executor.initEngine(datasource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return executor, nil
|
||||
}
|
||||
|
||||
func (e *MysqlExecutor) initEngine(dsInfo *models.DataSource) error {
|
||||
engineCache.Lock()
|
||||
defer engineCache.Unlock()
|
||||
|
||||
if engine, present := engineCache.cache[dsInfo.Id]; present {
|
||||
if version, _ := engineCache.versions[dsInfo.Id]; version == dsInfo.Version {
|
||||
e.engine = engine
|
||||
return nil
|
||||
}
|
||||
endpoint.sqlEngine = &tsdb.DefaultSqlEngine{
|
||||
MacroEngine: NewMysqlMacroEngine(),
|
||||
}
|
||||
|
||||
cnnstr := fmt.Sprintf("%s:%s@%s(%s)/%s?collation=utf8mb4_unicode_ci&parseTime=true&loc=UTC",
|
||||
dsInfo.User,
|
||||
dsInfo.Password,
|
||||
datasource.User,
|
||||
datasource.Password,
|
||||
"tcp",
|
||||
dsInfo.Url,
|
||||
dsInfo.Database)
|
||||
datasource.Url,
|
||||
datasource.Database,
|
||||
)
|
||||
endpoint.log.Debug("getEngine", "connection", cnnstr)
|
||||
|
||||
e.log.Debug("getEngine", "connection", cnnstr)
|
||||
|
||||
engine, err := xorm.NewEngine("mysql", cnnstr)
|
||||
engine.SetMaxOpenConns(10)
|
||||
engine.SetMaxIdleConns(10)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := endpoint.sqlEngine.InitEngine("mysql", datasource, cnnstr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
engineCache.cache[dsInfo.Id] = engine
|
||||
e.engine = engine
|
||||
return nil
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
func (e *MysqlExecutor) Query(ctx context.Context, dsInfo *models.DataSource, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) {
|
||||
result := &tsdb.Response{
|
||||
Results: make(map[string]*tsdb.QueryResult),
|
||||
// Query is the main function for the MysqlExecutor
|
||||
func (e *MysqlQueryEndpoint) Query(ctx context.Context, dsInfo *models.DataSource, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) {
|
||||
return e.sqlEngine.Query(ctx, dsInfo, tsdbQuery, e.transformToTimeSeries, e.transformToTable)
|
||||
}
|
||||
|
||||
macroEngine := NewMysqlMacroEngine(tsdbQuery.TimeRange)
|
||||
session := e.engine.NewSession()
|
||||
defer session.Close()
|
||||
db := session.DB()
|
||||
|
||||
for _, query := range tsdbQuery.Queries {
|
||||
rawSql := query.Model.Get("rawSql").MustString()
|
||||
if rawSql == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
queryResult := &tsdb.QueryResult{Meta: simplejson.New(), RefId: query.RefId}
|
||||
result.Results[query.RefId] = queryResult
|
||||
|
||||
rawSql, err := macroEngine.Interpolate(rawSql)
|
||||
if err != nil {
|
||||
queryResult.Error = err
|
||||
continue
|
||||
}
|
||||
|
||||
queryResult.Meta.Set("sql", rawSql)
|
||||
|
||||
rows, err := db.Query(rawSql)
|
||||
if err != nil {
|
||||
queryResult.Error = err
|
||||
continue
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
format := query.Model.Get("format").MustString("time_series")
|
||||
|
||||
switch format {
|
||||
case "time_series":
|
||||
err := e.TransformToTimeSeries(query, rows, queryResult)
|
||||
if err != nil {
|
||||
queryResult.Error = err
|
||||
continue
|
||||
}
|
||||
case "table":
|
||||
err := e.TransformToTable(query, rows, queryResult)
|
||||
if err != nil {
|
||||
queryResult.Error = err
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (e MysqlExecutor) TransformToTable(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult) error {
|
||||
func (e MysqlQueryEndpoint) transformToTable(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult) error {
|
||||
columnNames, err := rows.Columns()
|
||||
columnCount := len(columnNames)
|
||||
|
||||
@@ -166,7 +81,7 @@ func (e MysqlExecutor) TransformToTable(query *tsdb.Query, rows *core.Rows, resu
|
||||
rowLimit := 1000000
|
||||
rowCount := 0
|
||||
|
||||
for ; rows.Next(); rowCount += 1 {
|
||||
for ; rows.Next(); rowCount++ {
|
||||
if rowCount > rowLimit {
|
||||
return fmt.Errorf("MySQL query row limit exceeded, limit %d", rowLimit)
|
||||
}
|
||||
@@ -184,7 +99,7 @@ func (e MysqlExecutor) TransformToTable(query *tsdb.Query, rows *core.Rows, resu
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e MysqlExecutor) getTypedRowData(types []*sql.ColumnType, rows *core.Rows) (tsdb.RowValues, error) {
|
||||
func (e MysqlQueryEndpoint) getTypedRowData(types []*sql.ColumnType, rows *core.Rows) (tsdb.RowValues, error) {
|
||||
values := make([]interface{}, len(types))
|
||||
|
||||
for i, stype := range types {
|
||||
@@ -248,7 +163,7 @@ func (e MysqlExecutor) getTypedRowData(types []*sql.ColumnType, rows *core.Rows)
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (e MysqlExecutor) TransformToTimeSeries(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult) error {
|
||||
func (e MysqlQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult) error {
|
||||
pointsBySeries := make(map[string]*tsdb.TimeSeries)
|
||||
seriesByQueryOrder := list.New()
|
||||
columnNames, err := rows.Columns()
|
||||
@@ -261,7 +176,7 @@ func (e MysqlExecutor) TransformToTimeSeries(query *tsdb.Query, rows *core.Rows,
|
||||
rowLimit := 1000000
|
||||
rowCount := 0
|
||||
|
||||
for ; rows.Next(); rowCount += 1 {
|
||||
for ; rows.Next(); rowCount++ {
|
||||
if rowCount > rowLimit {
|
||||
return fmt.Errorf("MySQL query row limit exceeded, limit %d", rowLimit)
|
||||
}
|
||||
|
||||
@@ -18,14 +18,16 @@ func TestMySQL(t *testing.T) {
|
||||
SkipConvey("MySQL", t, func() {
|
||||
x := InitMySQLTestDB(t)
|
||||
|
||||
executor := &MysqlExecutor{
|
||||
engine: x,
|
||||
endpoint := &MysqlQueryEndpoint{
|
||||
sqlEngine: &tsdb.DefaultSqlEngine{
|
||||
MacroEngine: NewMysqlMacroEngine(),
|
||||
XormEngine: x,
|
||||
},
|
||||
log: log.New("tsdb.mysql"),
|
||||
}
|
||||
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
db := sess.DB()
|
||||
|
||||
sql := "CREATE TABLE `mysql_types` ("
|
||||
sql += "`atinyint` tinyint(1),"
|
||||
@@ -70,14 +72,23 @@ func TestMySQL(t *testing.T) {
|
||||
_, err = sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("TransformToTable should map MySQL column types to Go types", func() {
|
||||
rows, err := db.Query("SELECT * FROM mysql_types")
|
||||
defer rows.Close()
|
||||
Convey("Query with Table format should map MySQL column types to Go types", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": "SELECT * FROM mysql_types",
|
||||
"format": "table",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
queryResult := &tsdb.QueryResult{Meta: simplejson.New()}
|
||||
err = executor.TransformToTable(nil, rows, queryResult)
|
||||
So(err, ShouldBeNil)
|
||||
column := queryResult.Tables[0].Rows[0]
|
||||
So(*column[0].(*int8), ShouldEqual, 1)
|
||||
So(*column[1].(*string), ShouldEqual, "abc")
|
||||
|
||||
99
pkg/tsdb/postgres/macros.go
Normal file
99
pkg/tsdb/postgres/macros.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
)
|
||||
|
||||
//const rsString = `(?:"([^"]*)")`;
|
||||
const rsIdentifier = `([_a-zA-Z0-9]+)`
|
||||
const sExpr = `\$` + rsIdentifier + `\(([^\)]*)\)`
|
||||
|
||||
type PostgresMacroEngine struct {
|
||||
TimeRange *tsdb.TimeRange
|
||||
}
|
||||
|
||||
func NewPostgresMacroEngine() tsdb.SqlMacroEngine {
|
||||
return &PostgresMacroEngine{}
|
||||
}
|
||||
|
||||
func (m *PostgresMacroEngine) Interpolate(timeRange *tsdb.TimeRange, sql string) (string, error) {
|
||||
m.TimeRange = timeRange
|
||||
rExp, _ := regexp.Compile(sExpr)
|
||||
var macroError error
|
||||
|
||||
sql = replaceAllStringSubmatchFunc(rExp, sql, func(groups []string) string {
|
||||
res, err := m.evaluateMacro(groups[1], strings.Split(groups[2], ","))
|
||||
if err != nil && macroError == nil {
|
||||
macroError = err
|
||||
return "macro_error()"
|
||||
}
|
||||
return res
|
||||
})
|
||||
|
||||
if macroError != nil {
|
||||
return "", macroError
|
||||
}
|
||||
|
||||
return sql, nil
|
||||
}
|
||||
|
||||
func replaceAllStringSubmatchFunc(re *regexp.Regexp, str string, repl func([]string) string) string {
|
||||
result := ""
|
||||
lastIndex := 0
|
||||
|
||||
for _, v := range re.FindAllSubmatchIndex([]byte(str), -1) {
|
||||
groups := []string{}
|
||||
for i := 0; i < len(v); i += 2 {
|
||||
groups = append(groups, str[v[i]:v[i+1]])
|
||||
}
|
||||
|
||||
result += str[lastIndex:v[0]] + repl(groups)
|
||||
lastIndex = v[1]
|
||||
}
|
||||
|
||||
return result + str[lastIndex:]
|
||||
}
|
||||
|
||||
func (m *PostgresMacroEngine) evaluateMacro(name string, args []string) (string, error) {
|
||||
switch name {
|
||||
case "__time":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s AS \"time\"", args[0]), nil
|
||||
case "__timeEpoch":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("extract(epoch from %s) as \"time\"", args[0]), nil
|
||||
case "__timeFilter":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= to_timestamp(%d) AND %s <= to_timestamp(%d)", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__timeFrom":
|
||||
return fmt.Sprintf("to_timestamp(%d)", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
case "__timeTo":
|
||||
return fmt.Sprintf("to_timestamp(%d)", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__timeGroup":
|
||||
if len(args) < 2 {
|
||||
return "", fmt.Errorf("macro %v needs time column and interval", name)
|
||||
}
|
||||
return fmt.Sprintf("(extract(epoch from \"%s\")/extract(epoch from %s::interval))::int", args[0], args[1]), nil
|
||||
case "__unixEpochFilter":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__unixEpochFrom":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
case "__unixEpochTo":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
default:
|
||||
return "", fmt.Errorf("Unknown macro %v", name)
|
||||
}
|
||||
}
|
||||
80
pkg/tsdb/postgres/macros_test.go
Normal file
80
pkg/tsdb/postgres/macros_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestMacroEngine(t *testing.T) {
|
||||
Convey("MacroEngine", t, func() {
|
||||
engine := &PostgresMacroEngine{}
|
||||
timeRange := &tsdb.TimeRange{From: "5m", To: "now"}
|
||||
|
||||
Convey("interpolate __time function", func() {
|
||||
sql, err := engine.Interpolate(nil, "select $__time(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select time_column AS \"time\"")
|
||||
})
|
||||
|
||||
Convey("interpolate __time function wrapped in aggregation", func() {
|
||||
sql, err := engine.Interpolate(nil, "select min($__time(time_column))")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select min(time_column AS \"time\")")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "WHERE time_column >= to_timestamp(18446744066914186738) AND time_column <= to_timestamp(18446744066914187038)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select to_timestamp(18446744066914186738)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function", func() {
|
||||
|
||||
sql, err := engine.Interpolate(timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY (extract(epoch from \"time_column\")/extract(epoch from '5m'::interval))::int")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select to_timestamp(18446744066914187038)")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(timeRange, "select $__unixEpochFilter(18446744066914186738)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738 >= 18446744066914186738 AND 18446744066914186738 <= 18446744066914187038")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914187038")
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
245
pkg/tsdb/postgres/postgres.go
Normal file
245
pkg/tsdb/postgres/postgres.go
Normal file
@@ -0,0 +1,245 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/core"
|
||||
"github.com/grafana/grafana/pkg/components/null"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
)
|
||||
|
||||
type PostgresQueryEndpoint struct {
|
||||
sqlEngine tsdb.SqlEngine
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
tsdb.RegisterTsdbQueryEndpoint("postgres", NewPostgresQueryEndpoint)
|
||||
}
|
||||
|
||||
func NewPostgresQueryEndpoint(datasource *models.DataSource) (tsdb.TsdbQueryEndpoint, error) {
|
||||
endpoint := &PostgresQueryEndpoint{
|
||||
log: log.New("tsdb.postgres"),
|
||||
}
|
||||
|
||||
endpoint.sqlEngine = &tsdb.DefaultSqlEngine{
|
||||
MacroEngine: NewPostgresMacroEngine(),
|
||||
}
|
||||
|
||||
cnnstr := generateConnectionString(datasource)
|
||||
endpoint.log.Debug("getEngine", "connection", cnnstr)
|
||||
|
||||
if err := endpoint.sqlEngine.InitEngine("postgres", datasource, cnnstr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
func generateConnectionString(datasource *models.DataSource) string {
|
||||
password := ""
|
||||
for key, value := range datasource.SecureJsonData.Decrypt() {
|
||||
if key == "password" {
|
||||
password = value
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
sslmode := datasource.JsonData.Get("sslmode").MustString("require")
|
||||
return fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s", datasource.User, password, datasource.Url, datasource.Database, sslmode)
|
||||
}
|
||||
|
||||
func (e *PostgresQueryEndpoint) Query(ctx context.Context, dsInfo *models.DataSource, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) {
|
||||
return e.sqlEngine.Query(ctx, dsInfo, tsdbQuery, e.transformToTimeSeries, e.transformToTable)
|
||||
}
|
||||
|
||||
func (e PostgresQueryEndpoint) transformToTable(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult) error {
|
||||
|
||||
columnNames, err := rows.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
table := &tsdb.Table{
|
||||
Columns: make([]tsdb.TableColumn, len(columnNames)),
|
||||
Rows: make([]tsdb.RowValues, 0),
|
||||
}
|
||||
|
||||
for i, name := range columnNames {
|
||||
table.Columns[i].Text = name
|
||||
}
|
||||
|
||||
rowLimit := 1000000
|
||||
rowCount := 0
|
||||
|
||||
for ; rows.Next(); rowCount++ {
|
||||
if rowCount > rowLimit {
|
||||
return fmt.Errorf("PostgreSQL query row limit exceeded, limit %d", rowLimit)
|
||||
}
|
||||
|
||||
values, err := e.getTypedRowData(rows)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
table.Rows = append(table.Rows, values)
|
||||
}
|
||||
|
||||
result.Tables = append(result.Tables, table)
|
||||
result.Meta.Set("rowCount", rowCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e PostgresQueryEndpoint) getTypedRowData(rows *core.Rows) (tsdb.RowValues, error) {
|
||||
|
||||
types, err := rows.ColumnTypes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
values := make([]interface{}, len(types))
|
||||
valuePtrs := make([]interface{}, len(types))
|
||||
|
||||
for i := 0; i < len(types); i++ {
|
||||
valuePtrs[i] = &values[i]
|
||||
}
|
||||
|
||||
if err := rows.Scan(valuePtrs...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert types not handled by lib/pq
|
||||
// unhandled types are returned as []byte
|
||||
for i := 0; i < len(types); i++ {
|
||||
if value, ok := values[i].([]byte); ok == true {
|
||||
switch types[i].DatabaseTypeName() {
|
||||
case "NUMERIC":
|
||||
if v, err := strconv.ParseFloat(string(value), 64); err == nil {
|
||||
values[i] = v
|
||||
} else {
|
||||
e.log.Debug("Rows", "Error converting numeric to float", value)
|
||||
}
|
||||
case "UNKNOWN", "CIDR", "INET", "MACADDR":
|
||||
// char literals have type UNKNOWN
|
||||
values[i] = string(value)
|
||||
default:
|
||||
e.log.Debug("Rows", "Unknown database type", types[i].DatabaseTypeName(), "value", value)
|
||||
values[i] = string(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (e PostgresQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult) error {
|
||||
pointsBySeries := make(map[string]*tsdb.TimeSeries)
|
||||
seriesByQueryOrder := list.New()
|
||||
columnNames, err := rows.Columns()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rowLimit := 1000000
|
||||
rowCount := 0
|
||||
timeIndex := -1
|
||||
metricIndex := -1
|
||||
|
||||
// check columns of resultset
|
||||
for i, col := range columnNames {
|
||||
switch col {
|
||||
case "time":
|
||||
timeIndex = i
|
||||
case "metric":
|
||||
metricIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
if timeIndex == -1 {
|
||||
return fmt.Errorf("Found no column named time")
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
var timestamp float64
|
||||
var value null.Float
|
||||
var metric string
|
||||
|
||||
if rowCount > rowLimit {
|
||||
return fmt.Errorf("PostgreSQL query row limit exceeded, limit %d", rowLimit)
|
||||
}
|
||||
|
||||
values, err := e.getTypedRowData(rows)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch columnValue := values[timeIndex].(type) {
|
||||
case int64:
|
||||
timestamp = float64(columnValue * 1000)
|
||||
case float64:
|
||||
timestamp = columnValue * 1000
|
||||
case time.Time:
|
||||
timestamp = float64(columnValue.Unix() * 1000)
|
||||
default:
|
||||
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp")
|
||||
}
|
||||
|
||||
if metricIndex >= 0 {
|
||||
if columnValue, ok := values[metricIndex].(string); ok == true {
|
||||
metric = columnValue
|
||||
} else {
|
||||
return fmt.Errorf("Column metric must be of type char,varchar or text")
|
||||
}
|
||||
}
|
||||
|
||||
for i, col := range columnNames {
|
||||
if i == timeIndex || i == metricIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
switch columnValue := values[i].(type) {
|
||||
case int64:
|
||||
value = null.FloatFrom(float64(columnValue))
|
||||
case float64:
|
||||
value = null.FloatFrom(columnValue)
|
||||
case nil:
|
||||
value.Valid = false
|
||||
default:
|
||||
return fmt.Errorf("Value column must have numeric datatype, column: %s type: %T value: %v", col, columnValue, columnValue)
|
||||
}
|
||||
if metricIndex == -1 {
|
||||
metric = col
|
||||
}
|
||||
e.appendTimePoint(pointsBySeries, seriesByQueryOrder, metric, timestamp, value)
|
||||
rowCount++
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
for elem := seriesByQueryOrder.Front(); elem != nil; elem = elem.Next() {
|
||||
key := elem.Value.(string)
|
||||
result.Series = append(result.Series, pointsBySeries[key])
|
||||
}
|
||||
|
||||
result.Meta.Set("rowCount", rowCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e PostgresQueryEndpoint) appendTimePoint(pointsBySeries map[string]*tsdb.TimeSeries, seriesByQueryOrder *list.List, metric string, timestamp float64, value null.Float) {
|
||||
if series, exist := pointsBySeries[metric]; exist {
|
||||
series.Points = append(series.Points, tsdb.TimePoint{value, null.FloatFrom(timestamp)})
|
||||
} else {
|
||||
series := &tsdb.TimeSeries{Name: metric}
|
||||
series.Points = append(series.Points, tsdb.TimePoint{value, null.FloatFrom(timestamp)})
|
||||
pointsBySeries[metric] = series
|
||||
seriesByQueryOrder.PushBack(metric)
|
||||
}
|
||||
e.log.Debug("Rows", "metric", metric, "time", timestamp, "value", value)
|
||||
}
|
||||
125
pkg/tsdb/postgres/postgres_test.go
Normal file
125
pkg/tsdb/postgres/postgres_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/xorm"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/sqlutil"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
_ "github.com/lib/pq"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
// To run this test, remove the Skip from SkipConvey
|
||||
// and set up a PostgreSQL db named grafanatest and a user/password grafanatest/grafanatest
|
||||
func TestPostgres(t *testing.T) {
|
||||
SkipConvey("PostgreSQL", t, func() {
|
||||
x := InitPostgresTestDB(t)
|
||||
|
||||
endpoint := &PostgresQueryEndpoint{
|
||||
sqlEngine: &tsdb.DefaultSqlEngine{
|
||||
MacroEngine: NewPostgresMacroEngine(),
|
||||
XormEngine: x,
|
||||
},
|
||||
log: log.New("tsdb.postgres"),
|
||||
}
|
||||
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
|
||||
sql := `
|
||||
CREATE TABLE postgres_types(
|
||||
c00_smallint smallint,
|
||||
c01_integer integer,
|
||||
c02_bigint bigint,
|
||||
|
||||
c03_real real,
|
||||
c04_double double precision,
|
||||
c05_decimal decimal(10,2),
|
||||
c06_numeric numeric(10,2),
|
||||
|
||||
c07_char char(10),
|
||||
c08_varchar varchar(10),
|
||||
c09_text text,
|
||||
|
||||
c10_timestamp timestamp without time zone,
|
||||
c11_timestamptz timestamp with time zone,
|
||||
c12_date date,
|
||||
c13_time time without time zone,
|
||||
c14_timetz time with time zone,
|
||||
c15_interval interval
|
||||
);
|
||||
`
|
||||
_, err := sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
sql = `
|
||||
INSERT INTO postgres_types VALUES(
|
||||
1,2,3,
|
||||
4.5,6.7,1.1,1.2,
|
||||
'char10','varchar10','text',
|
||||
|
||||
now(),now(),now(),now(),now(),'15m'::interval
|
||||
);
|
||||
`
|
||||
_, err = sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Query with Table format should map PostgreSQL column types to Go types", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": "SELECT * FROM postgres_types",
|
||||
"format": "table",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
column := queryResult.Tables[0].Rows[0]
|
||||
So(column[0].(int64), ShouldEqual, 1)
|
||||
So(column[1].(int64), ShouldEqual, 2)
|
||||
So(column[2].(int64), ShouldEqual, 3)
|
||||
So(column[3].(float64), ShouldEqual, 4.5)
|
||||
So(column[4].(float64), ShouldEqual, 6.7)
|
||||
// libpq doesnt properly convert decimal, numeric and char to go types but returns []uint8 instead
|
||||
// So(column[5].(float64), ShouldEqual, 1.1)
|
||||
// So(column[6].(float64), ShouldEqual, 1.2)
|
||||
// So(column[7].(string), ShouldEqual, "char")
|
||||
So(column[8].(string), ShouldEqual, "varchar10")
|
||||
So(column[9].(string), ShouldEqual, "text")
|
||||
|
||||
So(column[10].(time.Time), ShouldHaveSameTypeAs, time.Now())
|
||||
So(column[11].(time.Time), ShouldHaveSameTypeAs, time.Now())
|
||||
So(column[12].(time.Time), ShouldHaveSameTypeAs, time.Now())
|
||||
So(column[13].(time.Time), ShouldHaveSameTypeAs, time.Now())
|
||||
So(column[14].(time.Time), ShouldHaveSameTypeAs, time.Now())
|
||||
|
||||
// libpq doesnt properly convert interval to go types but returns []uint8 instead
|
||||
// So(column[15].(time.Time), ShouldHaveSameTypeAs, time.Now())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func InitPostgresTestDB(t *testing.T) *xorm.Engine {
|
||||
x, err := xorm.NewEngine(sqlutil.TestDB_Postgres.DriverName, sqlutil.TestDB_Postgres.ConnStr)
|
||||
|
||||
// x.ShowSQL()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init postgres db %v", err)
|
||||
}
|
||||
|
||||
sqlutil.CleanDB(x)
|
||||
|
||||
return x
|
||||
}
|
||||
134
pkg/tsdb/sql_engine.go
Normal file
134
pkg/tsdb/sql_engine.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/go-xorm/core"
|
||||
"github.com/go-xorm/xorm"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
)
|
||||
|
||||
// SqlEngine is a wrapper class around xorm for relational database data sources.
|
||||
type SqlEngine interface {
|
||||
InitEngine(driverName string, dsInfo *models.DataSource, cnnstr string) error
|
||||
Query(
|
||||
ctx context.Context,
|
||||
ds *models.DataSource,
|
||||
query *TsdbQuery,
|
||||
transformToTimeSeries func(query *Query, rows *core.Rows, result *QueryResult) error,
|
||||
transformToTable func(query *Query, rows *core.Rows, result *QueryResult) error,
|
||||
) (*Response, error)
|
||||
}
|
||||
|
||||
// SqlMacroEngine interpolates macros into sql. It takes in the timeRange to be able to
|
||||
// generate queries that use from and to.
|
||||
type SqlMacroEngine interface {
|
||||
Interpolate(timeRange *TimeRange, sql string) (string, error)
|
||||
}
|
||||
|
||||
type DefaultSqlEngine struct {
|
||||
MacroEngine SqlMacroEngine
|
||||
XormEngine *xorm.Engine
|
||||
}
|
||||
|
||||
type engineCacheType struct {
|
||||
cache map[int64]*xorm.Engine
|
||||
versions map[int64]int
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
var engineCache = engineCacheType{
|
||||
cache: make(map[int64]*xorm.Engine),
|
||||
versions: make(map[int64]int),
|
||||
}
|
||||
|
||||
// InitEngine creates the db connection and inits the xorm engine or loads it from the engine cache
|
||||
func (e *DefaultSqlEngine) InitEngine(driverName string, dsInfo *models.DataSource, cnnstr string) error {
|
||||
engineCache.Lock()
|
||||
defer engineCache.Unlock()
|
||||
|
||||
if engine, present := engineCache.cache[dsInfo.Id]; present {
|
||||
if version, _ := engineCache.versions[dsInfo.Id]; version == dsInfo.Version {
|
||||
e.XormEngine = engine
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
engine, err := xorm.NewEngine(driverName, cnnstr)
|
||||
engine.SetMaxOpenConns(10)
|
||||
engine.SetMaxIdleConns(10)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
engineCache.cache[dsInfo.Id] = engine
|
||||
e.XormEngine = engine
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query is a default implementation of the Query method for an SQL data source.
|
||||
// The caller of this function must implement transformToTimeSeries and transformToTable and
|
||||
// pass them in as parameters.
|
||||
func (e *DefaultSqlEngine) Query(
|
||||
ctx context.Context,
|
||||
dsInfo *models.DataSource,
|
||||
tsdbQuery *TsdbQuery,
|
||||
transformToTimeSeries func(query *Query, rows *core.Rows, result *QueryResult) error,
|
||||
transformToTable func(query *Query, rows *core.Rows, result *QueryResult) error,
|
||||
) (*Response, error) {
|
||||
result := &Response{
|
||||
Results: make(map[string]*QueryResult),
|
||||
}
|
||||
|
||||
session := e.XormEngine.NewSession()
|
||||
defer session.Close()
|
||||
db := session.DB()
|
||||
|
||||
for _, query := range tsdbQuery.Queries {
|
||||
rawSql := query.Model.Get("rawSql").MustString()
|
||||
if rawSql == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
queryResult := &QueryResult{Meta: simplejson.New(), RefId: query.RefId}
|
||||
result.Results[query.RefId] = queryResult
|
||||
|
||||
rawSql, err := e.MacroEngine.Interpolate(tsdbQuery.TimeRange, rawSql)
|
||||
if err != nil {
|
||||
queryResult.Error = err
|
||||
continue
|
||||
}
|
||||
|
||||
queryResult.Meta.Set("sql", rawSql)
|
||||
|
||||
rows, err := db.Query(rawSql)
|
||||
if err != nil {
|
||||
queryResult.Error = err
|
||||
continue
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
format := query.Model.Get("format").MustString("time_series")
|
||||
|
||||
switch format {
|
||||
case "time_series":
|
||||
err := transformToTimeSeries(query, rows, queryResult)
|
||||
if err != nil {
|
||||
queryResult.Error = err
|
||||
continue
|
||||
}
|
||||
case "table":
|
||||
err := transformToTable(query, rows, queryResult)
|
||||
if err != nil {
|
||||
queryResult.Error = err
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -77,5 +77,8 @@ export class ColorPicker extends React.Component<IProps, any> {
|
||||
}
|
||||
|
||||
coreModule.directive('colorPicker', function (reactDirective) {
|
||||
return reactDirective(ColorPicker, ['color', 'onChange']);
|
||||
return reactDirective(ColorPicker, [
|
||||
'color',
|
||||
['onChange', { watchDepth: 'reference', wrapApply: true }]
|
||||
]);
|
||||
});
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import React from 'react';
|
||||
import $ from 'jquery';
|
||||
import tinycolor from 'tinycolor2';
|
||||
import coreModule from 'app/core/core_module';
|
||||
import { GfColorPalette } from './ColorPalette';
|
||||
import { GfSpectrumPicker } from './SpectrumPicker';
|
||||
|
||||
// Spectrum picker uses TinyColor and loads it as a global variable, so we can use it here also
|
||||
declare var tinycolor;
|
||||
const DEFAULT_COLOR = '#000000';
|
||||
|
||||
export interface IProps {
|
||||
color: string;
|
||||
@@ -19,8 +19,8 @@ export class ColorPickerPopover extends React.Component<IProps, any> {
|
||||
super(props);
|
||||
this.state = {
|
||||
tab: 'palette',
|
||||
color: this.props.color,
|
||||
colorString: this.props.color
|
||||
color: this.props.color || DEFAULT_COLOR,
|
||||
colorString: this.props.color || DEFAULT_COLOR
|
||||
};
|
||||
}
|
||||
|
||||
@@ -88,7 +88,7 @@ export class ColorPickerPopover extends React.Component<IProps, any> {
|
||||
);
|
||||
const spectrumTab = (
|
||||
<div id="spectrum">
|
||||
<GfSpectrumPicker color={this.props.color} onColorSelect={this.spectrumColorSelected.bind(this)} options={{}} />
|
||||
<GfSpectrumPicker color={this.state.color} onColorSelect={this.spectrumColorSelected.bind(this)} options={{}} />
|
||||
</div>
|
||||
);
|
||||
const currentTab = this.state.tab === 'palette' ? paletteTab : spectrumTab;
|
||||
|
||||
@@ -44,7 +44,7 @@ export class SeriesColorPicker extends React.Component<IProps, any> {
|
||||
return (
|
||||
<div className="graph-legend-popover">
|
||||
{this.props.series && this.renderAxisSelection()}
|
||||
<ColorPickerPopover color="#7EB26D" onColorSelect={this.onColorChange} />
|
||||
<ColorPickerPopover color={this.props.series.color} onColorSelect={this.onColorChange} />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
23
public/app/core/components/colorpicker/spectrum_picker.ts
Normal file
23
public/app/core/components/colorpicker/spectrum_picker.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
/**
|
||||
* Wrapper for the new ngReact <color-picker> directive for backward compatibility.
|
||||
* Allows remaining <spectrum-picker> untouched in outdated plugins.
|
||||
* Technically, it's just a wrapper for react component with two-way data binding support.
|
||||
*/
|
||||
import coreModule from '../../core_module';
|
||||
|
||||
export function spectrumPicker() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
require: 'ngModel',
|
||||
scope: true,
|
||||
replace: true,
|
||||
template: '<color-picker color="ngModel.$viewValue" onChange="onColorChange"></color-picker>',
|
||||
link: function(scope, element, attrs, ngModel) {
|
||||
scope.ngModel = ngModel;
|
||||
scope.onColorChange = (color) => {
|
||||
ngModel.$setViewValue(color);
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
coreModule.directive('spectrumPicker', spectrumPicker);
|
||||
@@ -4,9 +4,6 @@ import coreModule from 'app/core/core_module';
|
||||
|
||||
var template = `
|
||||
<select class="gf-form-input" ng-model="ctrl.model" ng-options="f.value as f.text for f in ctrl.options"></select>
|
||||
<info-popover mode="right-absolute">
|
||||
Not finding dashboard you want? Star it first, then it should appear in this select box.
|
||||
</info-popover>
|
||||
`;
|
||||
|
||||
export class DashboardSelectorCtrl {
|
||||
|
||||
@@ -7,6 +7,7 @@ import $ from 'jquery';
|
||||
import coreModule from 'app/core/core_module';
|
||||
import {profiler} from 'app/core/profiler';
|
||||
import appEvents from 'app/core/app_events';
|
||||
import Drop from 'tether-drop';
|
||||
|
||||
export class GrafanaCtrl {
|
||||
|
||||
@@ -117,6 +118,11 @@ export function grafanaAppDirective(playlistSrv, contextSrv) {
|
||||
if (data.params.kiosk) {
|
||||
appEvents.emit('toggle-kiosk-mode');
|
||||
}
|
||||
|
||||
// close all drops
|
||||
for (let drop of Drop.drops) {
|
||||
drop.destroy();
|
||||
}
|
||||
});
|
||||
|
||||
// handle kiosk mode
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user