Resolve merge conflicts with remote/master.

This commit is contained in:
Alin Sinpalean 2017-09-26 17:55:30 +02:00
commit f25aeadf21
964 changed files with 55310 additions and 215005 deletions

View File

@ -1,3 +0,0 @@
{
"directory": "public/vendor/"
}

1
.gitignore vendored
View File

@ -25,6 +25,7 @@ public/css/*.min.css
.idea/
*.iml
*.tmp
.DS_Store
.vscode/
/data/*

View File

@ -1,21 +0,0 @@
{
"preset" : "default",
"lineBreak" : {
"before" : {
"VariableDeclarationWithoutInit" : 0,
},
"after": {
"AssignmentOperator": -1,
"ArgumentListArrayExpression": ">=1"
}
},
"whiteSpace" : {
"before" : {
},
"after" : {
}
}
}

View File

@ -7,11 +7,45 @@
- UX changes to nav & side menu
- New dashboard grid layout system
# 4.5.0 (unreleased)
# 4.6.0 (unreleased)
## Enhancements
## New Features
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
* **Jaeger**: Add support for open tracing using jaeger in Grafana. [#9213](https://github.com/grafana/grafana/pull/9213)
* **Unit types**: New date & time unit types added, useful in singlestat to show dates & times. [#3678](https://github.com/grafana/grafana/issues/3678), [#6710](https://github.com/grafana/grafana/issues/6710), [#2764](https://github.com/grafana/grafana/issues/6710)
* **CLI**: Make it possible to install plugins from any url [#5873](https://github.com/grafana/grafana/issues/5873)
* **Prometheus**: Add support for instant queries [#5765](https://github.com/grafana/grafana/issues/5765), thx [@mtanda](https://github.com/mtanda)
## Minor
* **SMTP**: Make it possible to set specific EHLO for smtp client. [#9319](https://github.com/grafana/grafana/issues/9319)
* **Dataproxy**: Allow grafan to renegotiate tls connection [#9250](https://github.com/grafana/grafana/issues/9250)
# 4.5.2 (2017-09-22)
## Fixes
* **Graphite**: Fix for issues with jsonData & graphiteVersion null errors [#9258](https://github.com/grafana/grafana/issues/9258)
* **Graphite**: Fix for Grafana internal metrics to Graphite sending NaN values [#9279](https://github.com/grafana/grafana/issues/9279)
* **HTTP API**: Fix for HEAD method requests [#9307](https://github.com/grafana/grafana/issues/9307)
* **Templating**: Fix for duplicate template variable queries when refresh is set to time range change [#9185](https://github.com/grafana/grafana/issues/9185)
* **Metrics**: dont write NaN values to graphite [#9279](https://github.com/grafana/grafana/issues/9279)
# 4.5.1 (2017-09-15)
## Fixes
* **MySQL**: Fixed issue with query editor not showing [#9247](https://github.com/grafana/grafana/issues/9247)
## Breaking changes
* **Metrics**: The metric structure for internal metrics about Grafana published to graphite has changed. This might break dashboards for internal metrics.
# 4.5.0 (2017-09-14)
## Fixes & Enhancements since beta1
* **Security**: Security fix for api vulnerability (in multiple org setups).
* **Shortcuts**: Adds shortcut for creating new dashboard [#8876](https://github.com/grafana/grafana/pull/8876) thx [@mtanda](https://github.com/mtanda)
* **Graph**: Right Y-Axis label position fixed [#9172](https://github.com/grafana/grafana/pull/9172)
* **General**: Improve rounding of time intervals [#9197](https://github.com/grafana/grafana/pull/9197), thx [@alin-amana](https://github.com/alin-amana)
# 4.5.0-beta1 (2017-09-05)
@ -32,6 +66,7 @@
### Breaking change
* **InfluxDB/Elasticsearch**: The panel & data source option named "Group by time interval" is now named "Min time interval" and does now always define a lower limit for the auto group by time. Without having to use `>` prefix (that prefix still works). This should in theory have close to zero actual impact on existing dashboards. It does mean that if you used this setting to define a hard group by time interval of, say "1d", if you zoomed to a time range wide enough the time range could increase above the "1d" range as the setting is now always considered a lower limit.
* **Elasticsearch**: Elasticsearch metric queries without date histogram now return table formated data making table panel much easier to use for this use case. Should not break/change existing dashboards with stock panels but external panel plugins can be affected.
## Changes

View File

@ -1,26 +0,0 @@
{
"name": "grafana",
"version": "2.0.2",
"homepage": "https://github.com/grafana/grafana",
"authors": [],
"license": "Apache 2.0",
"ignore": [
"**/.*",
"node_modules",
"bower_components",
"public/vendor/",
"test",
"tests"
],
"dependencies": {
"jquery": "3.1.0",
"lodash": "4.15.0",
"angular": "1.6.1",
"angular-route": "1.6.1",
"angular-mocks": "1.6.1",
"angular-sanitize": "1.6.1",
"angular-native-dragdrop": "1.2.2",
"angular-bindonce": "0.3.3",
"clipboard": "^1.5.16"
}
}

View File

@ -318,6 +318,7 @@ key_file =
skip_verify = false
from_address = admin@grafana.localhost
from_name = Grafana
ehlo_identity =
[emails]
welcome_email_on_sign_up = false
@ -452,9 +453,26 @@ url = https://grafana.com
[grafana_com]
url = https://grafana.com
#################################### Distributed tracing ############
[tracing.jaeger]
# jaeger destination (ex localhost:6831)
address =
# tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
always_included_tag =
# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
sampler_type = const
# jaeger samplerconfig param
# for "const" sampler, 0 or 1 for always false/true respectively
# for "probabilistic" sampler, a probability between 0 and 1
# for "rateLimiting" sampler, the number of spans per second
# for "remote" sampler, param is the same as for "probabilistic"
# and indicates the initial sampling rate before the actual one
# is received from the mothership
sampler_param = 1
#################################### External Image Storage ##############
[external_image_storage]
# You can choose between (s3, webdav)
# You can choose between (s3, webdav, gcs)
provider =
[external_image_storage.s3]
@ -467,3 +485,7 @@ url =
username =
password =
public_url =
[external_image_storage.gcs]
key_file =
bucket =

View File

@ -295,6 +295,8 @@
;skip_verify = false
;from_address = admin@grafana.localhost
;from_name = Grafana
# EHLO identity in SMTP dialog (defaults to instance_name)
;ehlo_identity = dashboard.example.com
[emails]
;welcome_email_on_sign_up = false
@ -391,6 +393,23 @@
;address =
;prefix = prod.grafana.%(instance_name)s.
#################################### Distributed tracing ############
[tracing.jaeger]
# Enable by setting the address sending traces to jaeger (ex localhost:6831)
;address = localhost:6831
# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
;always_included_tag = tag1:value1
# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
;sampler_type = const
# jaeger samplerconfig param
# for "const" sampler, 0 or 1 for always false/true respectively
# for "probabilistic" sampler, a probability between 0 and 1
# for "rateLimiting" sampler, the number of spans per second
# for "remote" sampler, param is the same as for "probabilistic"
# and indicates the initial sampling rate before the actual one
# is received from the mothership
;sampler_param = 1
#################################### Grafana.com integration ##########################
# Url used to to import dashboards directly from Grafana.com
[grafana_com]
@ -399,7 +418,7 @@
#################################### External image storage ##########################
[external_image_storage]
# Used for uploading images to public servers so they can be included in slack/email messages.
# you can choose between (s3, webdav)
# you can choose between (s3, webdav, gcs)
;provider =
[external_image_storage.s3]
@ -412,3 +431,7 @@
;public_url =
;username =
;password =
[external_image_storage.gcs]
;key_file =
;bucket =

6
docker/blocks/jaeger/fig Normal file
View File

@ -0,0 +1,6 @@
jaeger:
image: jaegertracing/all-in-one:latest
ports:
- "localhost:6831:6831/udp"
- "16686:16686"

View File

@ -1,2 +1,3 @@
FROM prom/prometheus
ADD prometheus.yml /etc/prometheus/
ADD alert.rules /etc/prometheus/

View File

@ -0,0 +1,10 @@
# Alert Rules
ALERT AppCrash
IF process_open_fds > 0
FOR 15s
LABELS { severity="critical" }
ANNOTATIONS {
summary = "Number of open fds > 0",
description = "Just testing"
}

View File

@ -18,3 +18,8 @@ fake-prometheus-data:
environment:
FD_DATASOURCE: prom
alertmanager:
image: quay.io/prometheus/alertmanager
net: host
ports:
- "9093:9093"

View File

@ -6,22 +6,30 @@ global:
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
rule_files:
- "alert.rules"
# - "first.rules"
# - "second.rules"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 10s
scrape_timeout: 10s
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
alerting:
alertmanagers:
- scheme: http
static_configs:
#- targets: ['localhost:9090', '172.17.0.1:9091', '172.17.0.1:9100', '172.17.0.1:9150']
- targets: ['localhost:9090', '127.0.0.1:9091', '127.0.0.1:9100', '127.0.0.1:9150']
- targets:
- "127.0.0.1:9093"
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'node_exporter'
static_configs:
- targets: ['127.0.0.1:9100']
- job_name: 'fake-data-gen'
static_configs:
- targets: ['127.0.0.1:9091']
- job_name: 'grafana'
static_configs:
- targets: ['127.0.0.1:3000']

View File

@ -0,0 +1,15 @@
+++
title = "Internal metrics"
description = "Internal metrics exposed by Grafana"
keywords = ["grafana", "metrics", "internal metrics"]
type = "docs"
[menu.docs]
parent = "admin"
weight = 8
+++
# Internal metrics
Grafana collects some metrics about it self internally. Currently Grafana supports pushing metrics to graphite and exposing them to be scraped by Prometheus.
To enabled internal metrics you have to enable it under the [metrics] section in your [grafana.ini](http://docs.grafana.org/installation/configuration/#enabled-6) config file.If you want to push metrics to graphite you have also have to configure the [metrics.graphite](http://docs.grafana.org/installation/configuration/#metrics-graphite) section.

View File

@ -41,7 +41,7 @@ Proxy access means that the Grafana backend will proxy all requests from the bro
Click the ``Select metric`` link to start navigating the metric space. One you start you can continue using the mouse
or keyboard arrow keys. You can select a wildcard and still continue.
{{< docs-imagebox img="/img/docs/v45/graphite_query1_still.png" class="docs-image--center"
{{< docs-imagebox img="/img/docs/v45/graphite_query1_still.png"
animated-gif="/img/docs/v45/graphite_query1.gif" >}}
@ -52,7 +52,7 @@ a function is selected it will be added and your focus will be in the text box o
a parameter just click on it and it will turn into a text box. To delete a function click the function name followed
by the x icon.
{{< docs-imagebox img="/img/docs/v45/graphite_query2_still.png" class="docs-image--center"
{{< docs-imagebox img="/img/docs/v45/graphite_query2_still.png"
animated-gif="/img/docs/v45/graphite_query2.gif" >}}
@ -60,7 +60,7 @@ by the x icon.
Some functions like aliasByNode support an optional second argument. To add this parameter specify for example 3,-2 as the first parameter and the function editor will adapt and move the -2 to a second parameter. To remove the second optional parameter just click on it and leave it blank and the editor will remove it.
{{< docs-imagebox img="/img/docs/v45/graphite_query3_still.png" class="docs-image--center"
{{< docs-imagebox img="/img/docs/v45/graphite_query3_still.png"
animated-gif="/img/docs/v45/graphite_query3.gif" >}}
@ -68,6 +68,10 @@ Some functions like aliasByNode support an optional second argument. To add this
You can reference queries by the row “letter” that theyre on (similar to Microsoft Excel). If you add a second query to a graph, you can reference the first query simply by typing in #A. This provides an easy and convenient way to build compounded queries.
{{< docs-imagebox img="/img/docs/v45/graphite_nested_queries_still.png"
animated-gif="/img/docs/v45/graphite_nested_queries.gif" >}}
## Point consolidation
All Graphite metrics are consolidated so that Graphite doesn't return more data points than there are pixels in the graph. By default,

View File

@ -41,9 +41,7 @@ mode is also more secure as the username & password will never reach the browser
## Query Editor
{{< docs-imagebox img="/img/docs/v45/influxdb_query_still.png" class="docs-image--center"
animated-gif="/img/docs/v45/influxdb_query.gif" >}}
{{< docs-imagebox img="/img/docs/v45/influxdb_query_still.png" class="docs-image--no-shadow" animated-gif="/img/docs/v45/influxdb_query.gif" >}}
You find the InfluxDB editor in the metrics tab in Graph or Singlestat panel's edit mode. You enter edit mode by clicking the
panel title, then edit. The editor allows you to select metrics and tags.
@ -59,10 +57,8 @@ will automatically adjust the filter tag condition to use the InfluxDB regex mat
### Field & Aggregation functions
In the `SELECT` row you can specify what fields and functions you want to use. If you have a
group by time you need an aggregation function. Some functions like derivative require an aggregation function.
The editor tries simplify and unify this part of the query. For example:
![](/img/docs/influxdb/select_editor.png)
group by time you need an aggregation function. Some functions like derivative require an aggregation function. The editor tries simplify and unify this part of the query. For example:<br>
![](/img/docs/influxdb/select_editor.png)<br>
The above will generate the following InfluxDB `SELECT` clause:

View File

@ -11,8 +11,7 @@ weight = 7
# Using MySQL in Grafana
> Only available in Grafana v4.3+. This data source is not ready for
> production use, currently in development (alpha state).
> Only available in Grafana v4.3+.
Grafana ships with a built-in MySQL data source plugin that allow you to query any visualize
data from a MySQL compatible database.
@ -58,8 +57,7 @@ If the `Format as` query option is set to `Table` then you can basically do any
Query editor with example query:
![](/img/docs/v43/mysql_table_query.png)
{{< docs-imagebox img="/img/docs/v45/mysql_table_query.png" >}}
The query:

View File

@ -39,7 +39,8 @@ Name | Description
Open a graph in edit mode by click the title > Edit (or by pressing `e` key while hovering over panel).
![](/img/docs/v43/prometheus_query_editor.png)
{{< docs-imagebox img="/img/docs/v45/prometheus_query_editor_still.png"
animated-gif="/img/docs/v45/prometheus_query_editor.gif" >}}
Name | Description
------- | --------

View File

@ -12,42 +12,23 @@ weight = 4
# Dashboard List Panel
The dashboard list panel allows you to display dynamic links to other dashboards. The list can be configured to use starred dashboards, a search query and/or dashboard tags.
{{< docs-imagebox img="/img/docs/v45/dashboard-list-panels.png" max-width= "800px" >}}
<img class="no-shadow" src="/img/docs/v2/dashboard_list_panels.png">
The dashboard list panel allows you to display dynamic links to other dashboards. The list can be configured to use starred dashboards, recently viewed dashboards, a search query and/or dashboard tags.
> On each dashboard load, the dashlist panel will re-query the dashboard list, always providing the most up to date results.
## Mode: Starred Dashboards
## Dashboard List Options
The `starred` dashboard selection displays starred dashboards, up to the number specified in the `Limit Number to` field, in alphabetical order. On dashboard load, the dashlist panel will re-query the favorites to appear in dashboard list panel, always providing the most up to date results.
{{< docs-imagebox img="/img/docs/v45/dashboard-list-options.png" max-width="600px" class="docs-image--no-shadow">}}
<img class="no-shadow" src="/img/docs/v2/dashboard_list_config_starred.png">
## Mode: Search Dashboards
The panel may be configured to search by either string query or tag(s). On dashboard load, the dashlist panel will re-query the dashboard list, always providing the most up to date results.
To configure dashboard list in this manner, select `search` from the Mode select box. When selected, the Search Options section will appear.
Name | Description
------------ | -------------
Mode | Set search or starred mode
Query | If in search mode specify the search query
Tags | if in search mode specify dashboard tags to search for
Limit number to | Specify the maximum number of dashboards
### Search by string
To search by a string, enter a search query in the `Search Options: Query` field. Queries are case-insensitive, and partial values are accepted.
<img class="no-shadow" src="/img/docs/v2/dashboard_list_config_string.png">
### Search by tag
To search by one or more tags, enter your selection in the `Search Options: Tags:` field. Note that existing tags will not appear as you type, and *are* case sensitive. To see a list of existing tags, you can always return to the dashboard, open the Dashboard Picker at the top and click `tags` link in the search bar.
<img class="no-shadow" src="/img/docs/v2/dashboard_list_config_tags.png">
1. `Starred`: The starred dashboard selection displays starred dashboards in alphabetical order.
2. `Recently Viewed`: The recently viewed dashboard selection displays recently viewed dashboards in alphabetical order.
3. `Search`: The search dashboard selection displays dashboards by search query or tag(s).
4. `Show Headings`: When show headings is ticked the choosen list selection(Starred, Recently Viewed, Search) is shown as a heading.
5. `Max Items`: Max items set the maximum of items in a list.
6. `Query`: Here is where you enter your query you want to search by. Queries are case-insensitive, and partial values are accepted.
7. `Tags`: Here is where you enter your tag(s) you want to search by. Note that existing tags will not appear as you type, and *are* case sensitive. To see a list of existing tags, you can always return to the dashboard, open the Dashboard Picker at the top and click `tags` link in the search bar.
> When multiple tags and strings appear, the dashboard list will display those matching ALL conditions.

View File

@ -13,14 +13,15 @@ weight = 1
The main panel in Grafana is simply named Graph. It provides a very rich set of graphing options.
<img src="/img/docs/v1/graph_overview.png" class="no-shadow">
{{< docs-imagebox img="/img/docs/v45/graph_overview.png" class="docs-image--no-shadow" max-width= "900px" >}}
Clicking the title for a panel exposes a menu. The `edit` option opens additional configuration
options for the panel.
1. Clicking the title for a panel exposes a menu. The `edit` option opens additional configuration options for the panel.
2. Click to open color & axis selection.
3. Click to only show this series. Shift/Ctrl + click to hide series.
## General
![](/img/docs/v43/graph_general.png)
{{< docs-imagebox img="/img/docs/v43/graph_general.png" max-width= "900px" >}}
The general tab allows customization of a panel's appearance and menu options.
@ -50,15 +51,11 @@ populate the template variable to a desired value from the link.
The metrics tab defines what series data and sources to render. Each datasource provides different
options.
## Axes & Grid
## Axes
![](/img/docs/v43/graph_axes_grid_options.png)
{{< docs-imagebox img="/img/docs/v43/graph_axes_grid_options.png" max-width= "900px" >}}
The Axes & Grid tab controls the display of axes, grids and legend.
### Axes
The ``Left Y`` and ``Right Y`` can be customized using:
The Axes tab controls the display of axes, grids and legend. The ``Left Y`` and ``Right Y`` can be customized using:
- ``Unit`` - The display unit for the Y value
- ``Grid Max`` - The maximum Y value. (default auto)
@ -105,7 +102,7 @@ It is just the sum of all data points received by Grafana.
## Display styles
![](/img/docs/v43/graph_display_styles.png)
{{< docs-imagebox img="/img/docs/v43/graph_display_styles.png" max-width= "900px" >}}
Display styles control visual properties of the graph.
@ -160,4 +157,6 @@ There is an option under Series overrides to draw lines as dashes. Set Dashes to
## Time Range
![](/img/docs/v2/graph_time_range.png)
The time range tab allows you to override the dashboard time range and specify a panel specific time. Either through a relative from now time option or through a timeshift.
{{< docs-imagebox img="/img/docs/v45/graph-time-range.png" max-width= "900px" >}}

View File

@ -12,7 +12,7 @@ weight = 2
# Singlestat Panel
![](/img/docs/v1/singlestat_panel2.png)
{{< docs-imagebox img="/img/docs/v45/singlestat-panel.png" max-width="900px" >}}
The Singlestat Panel allows you to show the one main summary stat of a SINGLE series. It reduces the series into a single number (by looking at the max, min, average, or sum of values in the series). Singlestat also provides thresholds to color the stat or the Panel background. It can also translate the single number into a text value, and show a sparkline summary of the series.
@ -20,11 +20,9 @@ The Singlestat Panel allows you to show the one main summary stat of a SINGLE se
The singlestat panel has a normal query editor to allow you define your exact metric queries like many other Panels. Through the Options tab, you can access the Singlestat-specific functionality.
<img class="no-shadow" src="/img/docs/v1/Singlestat-BaseSettings.png">
{{< docs-imagebox img="/img/docs/v45/singlestat-value-options.png" class="docs-image--no-shadow" max-width= "900px" >}}
1. `Big Value`: Big Value refers to how we display the main stat for the Singlestat Panel. This is always a single value that is displayed in the Panel in between two strings, `Prefix` and `Suffix`. The single number is calculated by choosing a function (min,max,average,current,total) of your metric query. This functions reduces your query into a single numeric value.
2. `Font Size`: You can use this section to select the font size of the different texts in the Singlestat Panel, i.e. prefix, value and postfix.
3. `Values`: The Value fields let you set the function (min, max, average, current, total, first, delta, range) that your entire query is reduced into a single value with. You can also set the font size of the Value field and font-size (as a %) of the metric query that the Panel is configured with. This reduces the entire query into a single summary value that is displayed.
1. `Stats`: The Stats field let you set the function (min, max, average, current, total, first, delta, range) that your entire query is reduced into a single value with. This reduces the entire query into a single summary value that is displayed.
* `min` - The smallest value in the series
* `max` - The largest value in the series
* `avg` - The average of all the non-null values in the series
@ -34,47 +32,64 @@ The singlestat panel has a normal query editor to allow you define your exact me
* `delta` - The total incremental increase (of a counter) in the series. An attempt is made to account for counter resets, but this will only be accurate for single instance metrics. Used to show total counter increase in time series.
* `diff` - The difference betwen 'current' (last value) and 'first'.
* `range` - The difference between 'min' and 'max'. Useful the show the range of change for a gauge.
4. `Prefix/Postfix`: The Prefix/Postfix fields let you define a custom label and font-size (as a %) to appear *before/after* the value. The `$__name` variable can be used here to use the series name or alias from the metric query.
5. `Units`: Units are appended to the the Singlestat within the panel, and will respect the color and threshold settings for the value.
6. `Decimals`: The Decimal field allows you to override the automatic decimal precision, and set it explicitly.
2. `Prefix/Postfix`: The Prefix/Postfix fields let you define a custom label to appear *before/after* the value. The `$__name` variable can be used here to use the series name or alias from the metric query.
3. `Units`: Units are appended to the the Singlestat within the panel, and will respect the color and threshold settings for the value.
4. `Decimals`: The Decimal field allows you to override the automatic decimal precision, and set it explicitly.
5. `Font Size`: You can use this section to select the font size of the different texts in the Singlestat Panel, i.e. prefix, value and postfix.
### Coloring
The coloring options of the Singlestat Panel config allow you to dynamically change the colors based on the Singlestat value.
<img class="no-shadow" src="/img/docs/v1/Singlestat-Coloring.png">
{{< docs-imagebox img="/img/docs/v45/singlestat-color-options.png" max-width="500px" class="docs-image--right docs-image--no-shadow">}}
1. `Background`: This checkbox applies the configured thresholds and colors to the entirety of the Singlestat Panel background.
2. `Value`: This checkbox applies the configured thresholds and colors to the summary stat.
3. `Thresholds`: Change the background and value colors dynamically within the panel, depending on the Singlestat value. The threshold field accepts **2 comma-separated** values which represent 3 ranges that correspond to the three colors directly to the right. For example: if the thresholds are 70, 90 then the first color represents < 70, the second color represents between 70 and 90 and the third color represents > 90.
4. `Colors`: Select a color and opacity
2. `Thresholds`: Change the background and value colors dynamically within the panel, depending on the Singlestat value. The threshold field accepts **2 comma-separated** values which represent 3 ranges that correspond to the three colors directly to the right. For example: if the thresholds are 70, 90 then the first color represents < 70, the second color represents between 70 and 90 and the third color represents > 90.
3. `Colors`: Select a color and opacity
4. `Value`: This checkbox applies the configured thresholds and colors to the summary stat.
5. `Invert order`: This link toggles the threshold color order.</br>For example: Green, Orange, Red (<img class="no-shadow" src="/img/docs(v1/gyr.png">) will become Red, Orange, Green (<img class="no-shadow" src="/img/docs/v1/ryg.png">).
### Spark Lines
Sparklines are a great way of seeing the historical data related to the summary stat, providing valuable context at a glance. Sparklines act differently than traditional Graph Panels and do not include x or y axis, coordinates, a legend, or ability to interact with the graph.
<img class="no-shadow" src="/img/docs/v1/Singlestat-Sparklines.png">
{{< docs-imagebox img="/img/docs/v45/singlestat-spark-options.png" max-width="500px" class="docs-image--right docs-image--no-shadow">}}
1. `Show`: The show checkbox will toggle whether the spark line is shown in the Panel. When unselected, only the Singlestat value will appear.
2. `Background`: Check if you want the sparklines to take up the full panel width, or uncheck if they should be below the main Singlestat value.
2. `Full Height`: Check if you want the sparklines to take up the full panel height, or uncheck if they should be below the main Singlestat value.
3. `Line Color`: This color selection applies to the color of the sparkline itself.
4. `Fill Color`: This color selection applies to the area below the sparkline.
<div class="clearfix"></div>
> ***Pro-tip:*** Reduce the opacity on fill colors for nice looking panels.
### Gauge
Gauges gives a clear picture of how high a value is in it's context. It's a great way to see if a value is close to the thresholds. The gauge uses the colors set in the color options.
{{< docs-imagebox img="/img/docs/v45/singlestat-gauge-options.png" max-width="500px" class="docs-image--right docs-image--no-shadow">}}
1. `Show`: The show checkbox will toggle wether the gauge is shown in the panel. When unselected, only the Singlestat value will appear.
2. `Min/Max`: This sets the start and end point for the gauge.
3. `Threshold Labels`: Check if you want to show the threshold labels. Thresholds are set in the color options.
4. `Threshold Markers`: Check if you want to have a second meter showing the thresholds.
<div class="clearfix"></div>
### Value to text mapping
{{< docs-imagebox img="/img/docs/v45/singlestat-value-mapping.png" class="docs-image--right docs-image--no-shadow">}}
Value to text mapping allows you to translate the value of the summary stat into explicit text. The text will respect all styling, thresholds and customization defined for the value. This can be useful to translate the number of the main Singlestat value into a context-specific human-readable word or message.
<img class="no-shadow" src="/img/docs/v1/Singlestat-ValueMapping.png">
<div class="clearfix"></div>
## Troubleshooting
### Multiple Series Error
<img class="no-shadow" src="/img/docs/v2/Singlestat-MultiSeriesError.png">
{{< docs-imagebox img="/img/docs/v45/singelstat-multiple-series-error.png" class="docs-image--right docs-image--no-shadow">}}
Grafana 2.5 introduced stricter checking for multiple-series on singlestat panels. In previous versions, the panel logic did not verify that only a single series was used, and instead, displayed the first series encountered. Depending on your data source, this could have lead to inconsistent data being shown and/or a general confusion about which metric was being displayed.

View File

@ -12,7 +12,7 @@ weight = 2
# Table Panel
<img src="/assets/img/features/table-panel.png">
<img class="screenshot" src="/assets/img/features/table-panel.png">
The new table panel is very flexible, supporting both multiple modes for time series as well as for
table, annotation and raw JSON data. It also provides date formatting and value formatting and coloring options.
@ -22,55 +22,63 @@ To view table panels in action and test different configurations with sample dat
## Options overview
The table panel has many ways to manipulate your data for optimal presentation.
{{< docs-imagebox img="/img/docs/v45/table_options.png" class="docs-image--no-shadow" max-width= "500px" >}}
<img class="no-shadow" src="/img/docs/v2/table-config2.png">
1. `Data`: Control how your query is transformed into a table.
2. `Table Display`: Table display options.
3. `Column Styles`: Column value formatting and display options.
2. `Paging`: Table display options.
## Data to Table
<img class="no-shadow" src="/img/docs/v2/table-data-options.png">
{{< docs-imagebox img="/img/docs/v45/table_data_options.png" max-width="500px" class="docs-image--right">}}
The data section contains the **To Table Transform (1)**. This is the primary option for how your data/metric
query should be transformed into a table format. The **Columns (2)** option allows you to select what columns
you want in the table. Only applicable for some transforms.
<div class="clearfix"></div>
### Time series to rows
<img src="/img/docs/v2/table_ts_to_rows2.png">
{{< docs-imagebox img="/img/docs/v45/table_ts_to_rows.png" >}}
In the most simple mode you can turn time series to rows. This means you get a `Time`, `Metric` and a `Value` column. Where `Metric` is the name of the time series.
### Time series to columns
![](/img/docs/v2/table_ts_to_columns2.png)
{{< docs-imagebox img="/img/docs/v45/table_ts_to_columns.png" >}}
This transform allows you to take multiple time series and group them by time. Which will result in the primary column being `Time` and a column for each time series.
### Time series aggregations
![](/img/docs/v2/table_ts_to_aggregations2.png)
{{< docs-imagebox img="/img/docs/v45/table_ts_to_aggregations.png" >}}
This table transformation will lay out your table into rows by metric, allowing columns of `Avg`, `Min`, `Max`, `Total`, `Current` and `Count`. More than one column can be added.
### Annotations
![](/img/docs/v2/table_annotations.png)
{{< docs-imagebox img="/img/docs/v45/table_annotations.png" >}}
If you have annotations enabled in the dashboard you can have the table show them. If you configure this
mode then any queries you have in the metrics tab will be ignored.
### JSON Data
![](/img/docs/v2/table_json_data.png)
{{< docs-imagebox img="/img/docs/v45/table_json_data.png" max-width="500px" >}}
If you have an Elasticsearch **Raw Document** query or an Elasticsearch query without a `date histogram` use this
transform mode and pick the columns using the **Columns** section.
![](/img/docs/v2/elastic_raw_doc.png)
{{< docs-imagebox img="/img/docs/v45/elastic_raw_doc.png" >}}
## Table Display
<img class="no-shadow" src="/img/docs/v2/table-display.png">
{{< docs-imagebox img="/img/docs/v45/table_paging.png" class="docs-image--no-shadow docs-image--right" max-width="350px" >}}
1. `Pagination (Page Size)`: The table display fields allow you to control The `Pagination` (page size) is the threshold at which the table rows will be broken into pages. For example, if your table had 95 records with a pagination value of 10, your table would be split across 9 pages.
2. `Scroll`: The `scroll bar` checkbox toggles the ability to scroll within the panel, when unchecked, the panel height will grow to display all rows.
@ -81,13 +89,11 @@ transform mode and pick the columns using the **Columns** section.
The column styles allow you control how dates and numbers are formatted.
<img class="no-shadow" src="/img/docs/v2/Column-Options.png">
{{< docs-imagebox img="/img/docs/v45/table_column_styles.png" class="docs-image--no-shadow" >}}
1. `Name or regex`: The Name or Regex field controls what columns the rule should be applied to. The regex or name filter will be matched against the column name not against column values.
2. `Type`: The three supported types of types are `Number`, `String` and `Date`.
3. `Title`: Title for the column, when using a Regex the title can include replacement strings like `$1`.
4. `Format`: Specify date format. Only available when `Type` is set to `Date`.
5. `Coloring` and `Thresholds`: Specify color mode and thresholds limits.
6. `Unit` and `Decimals`: Specify unit and decimal precision for numbers.
7. `Add column style rule`: Add new column rule.
2. `Column Header`: Title for the column, when using a Regex the title can include replacement strings like `$1`.
3. `Add column style rule`: Add new column rule.
4. `Thresholds` and `Coloring`: Specify color mode and thresholds limits.
5. `Type`: The three supported types of types are `Number`, `String` and `Date`. `Unit` and `Decimals`: Specify unit and decimal precision for numbers.`Format`: Specify date format for dates.

View File

@ -8,7 +8,7 @@ weight = 7
# Keyboard shortcuts
{{< docs-imagebox img="/img/docs/v4/shortcuts.png" max-width="20rem" >}}
{{< docs-imagebox img="/img/docs/v4/shortcuts.png" max-width="20rem" class="docs-image--right" >}}
Grafana v4 introduces a number of really powerful keyboard shortcuts. You can now focus a panel
by hovering over it with your mouse. With a panel focused you can simple hit `e` to toggle panel

View File

@ -16,16 +16,13 @@ weight = -4
### New prometheus query editor
The new query editor has full syntax highlighting. As well as auto complete for metrics, functions, and range vectors.
The new query editor has full syntax highlighting. As well as auto complete for metrics, functions, and range vectors. There is also integrated function docs right from the query editor!
![](/img/docs/v45/new_prom_editor_1.png)
There is also integrated function docs right from the query editor!
![](/img/docs/v45/new_prom_editor_2.png)
{{< docs-imagebox img="/img/docs/v45/prometheus_query_editor_still.png" class="docs-image--block" animated-gif="/img/docs/v45/prometheus_query_editor.gif" >}}
### Elasticsearch: Add ad-hoc filters from the table panel
![](/img/docs/v45/elastic_ad_hoc_filters.png)
{{< docs-imagebox img="/img/docs/v45/elastic_ad_hoc_filters.png" class="docs-image--block" >}}
### Table cell links!
Create column styles that turn cells into links that use the value in the cell (or other other row values) to generate a url to another dashboard or system:

View File

@ -161,6 +161,7 @@ Only works with Basic Authentication (username and password). See [introduction]
"enabled":"false",
"from_address":"admin@grafana.localhost",
"from_name":"Grafana",
"ehlo_identity":"dashboard.example.com",
"host":"localhost:25",
"key_file":"",
"password":"************",

View File

@ -308,15 +308,15 @@ options are `Editor` and `Admin`.
## [auth.github]
You need to create a GitHub application (you find this under the GitHub
profile page). When you create the application you will need to specify
You need to create a GitHub OAuth application (you find this under the GitHub
settings page). When you create the application you will need to specify
a callback URL. Specify this as callback:
http://<my_grafana_server_name_or_ip>:<grafana_server_port>/login/github
This callback URL must match the full HTTP address that you use in your
browser to access Grafana, but with the prefix path of `/login/github`.
When the GitHub application is created you will get a Client ID and a
When the GitHub OAuth application is created you will get a Client ID and a
Client Secret. Specify these in the Grafana configuration file. For
example:
@ -593,6 +593,9 @@ Address used when sending out emails, defaults to `admin@grafana.localhost`
### from_name
Name to be used when sending out emails, defaults to `Grafana`
### ehlo_identity
Name to be used as client identity for EHLO in SMTP dialog, defaults to instance_name.
## [log]
### mode
@ -645,7 +648,7 @@ Time to live for snapshots.
These options control how images should be made public so they can be shared on services like slack.
### provider
You can choose between (s3, webdav). If left empty Grafana will ignore the upload action.
You can choose between (s3, webdav, gcs). If left empty Grafana will ignore the upload action.
## [external_image_storage.s3]
@ -677,6 +680,17 @@ basic auth username
### password
basic auth password
## [external_image_storage.gcs]
### key_file
Path to JSON key file associated with a Google service account to authenticate and authorize.
Service Account keys can be created and downloaded from https://console.developers.google.com/permissions/serviceaccounts.
Service Account should have "Storage Object Writer" role.
### bucket name
Bucket Name on Google Cloud Storage.
## [alerting]
### enabled

View File

@ -15,27 +15,31 @@ weight = 1
Description | Download
------------ | -------------
Stable for Debian-based Linux | [grafana_4.4.3_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb)
Beta for Debian-based Linux | [grafana_4.5.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb)
Stable for Debian-based Linux | [grafana_4.5.2_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.2_amd64.deb)
<!-- Beta for Debian-based Linux | [grafana_4.5.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb) -->
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
installation.
## Install Stable
```bash
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.2_amd64.deb
sudo apt-get install -y adduser libfontconfig
sudo dpkg -i grafana_4.4.3_amd64.deb
sudo dpkg -i grafana_4.5.2_amd64.deb
```
<!--
## Install Latest Beta
```bash
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.2-beta1_amd64.deb
sudo apt-get install -y adduser libfontconfig
sudo dpkg -i grafana_4.5.0-beta1_amd64.deb
sudo dpkg -i grafana_4.5.2-beta1_amd64.deb
```
-->
## APT Repository

View File

@ -43,3 +43,35 @@ To upgrade grafana if you've installed from HEAD:
```
brew reinstall --HEAD grafana/grafana/grafana
```
### Starting Grafana
To start Grafana using homebrew services first make sure homebrew/services is installed.
```
brew tap homebrew/services
```
Then start Grafana using:
```
brew services start grafana
```
### Configuration
The Configuration file should be located at `/usr/local/etc/grafana/grafana.ini`.
### Logs
The log file should be located at `/usr/local/var/log/grafana/grafana.log`.
### Plugins
If you want to manually install a plugin place it here: `/usr/local/var/lib/grafana/plugins`.
### Database
The default sqlite database is located at `/usr/local/var/lib/grafana`

View File

@ -15,8 +15,9 @@ weight = 2
Description | Download
------------ | -------------
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.4.3 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm)
Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-beta1.x86_64.rpm)
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.2 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.2-1.x86_64.rpm)
<!-- Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-beta1.x86_64.rpm) -->
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
installation.
@ -25,19 +26,19 @@ installation.
You can install Grafana using Yum directly.
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.2-1.x86_64.rpm
Or install manually using `rpm`.
#### On CentOS / Fedora / Redhat:
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.2-1.x86_64.rpm
$ sudo yum install initscripts fontconfig
$ sudo rpm -Uvh grafana-4.4.3-1.x86_64.rpm
$ sudo rpm -Uvh grafana-4.5.2-1.x86_64.rpm
#### On OpenSuse:
$ sudo rpm -i --nodeps grafana-4.4.3-1.x86_64.rpm
$ sudo rpm -i --nodeps grafana-4.5.2-1.x86_64.rpm
## Install via YUM Repository
@ -53,8 +54,7 @@ Add the following to a new file at `/etc/yum.repos.d/grafana.repo`
sslverify=1
sslcacert=/etc/pki/tls/certs/ca-bundle.crt
There is also a testing repository if you want beta or release
candidates.
There is also a testing repository if you want beta or release candidates.
baseurl=https://packagecloud.io/grafana/testing/el/6/$basearch

View File

@ -94,10 +94,10 @@ to the same location (and overwrite the existing files). This might overwrite yo
recommend you place your config changes in a file named `<grafana_install_dir>/conf/custom.ini`
as this will make upgrades easier without risking losing your config changes.
## Upgrading form 1.x
## Upgrading from 1.x
[Migrating from 1.x to 2.x]({{< relref "installation/migrating_to2.md" >}})
## Upgrading form 2.x
## Upgrading from 2.x
We are not aware of any issues upgrading directly from 2.x to 4.x but to on the safe side go via 3.x.
We are not aware of any issues upgrading directly from 2.x to 4.x but to be on the safe side go via 3.x => 4.x.

View File

@ -13,7 +13,7 @@ weight = 3
Description | Download
------------ | -------------
Latest stable package for Windows | [grafana.4.4.3.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3.windows-x64.zip)
Latest stable package for Windows | [grafana.4.5.2.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.2.windows-x64.zip)
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
installation.
@ -27,7 +27,7 @@ this folder to anywhere you want Grafana to run from. Go into the
The default Grafana port is `3000`, this port requires extra permissions
on windows. Edit `custom.ini` and uncomment the `http_port`
configuration option and change it to something like `8080` or similar.
configuration option (`;` is the comment character in ini files) and change it to something like `8080` or similar.
That port should not require extra Windows privileges.
Start Grafana by executing `grafana-server.exe`, preferably from the

View File

@ -72,6 +72,11 @@ The Download URL from Grafana.com API is in this form:
`https://grafana.com/api/plugins/<plugin id>/versions/<version number>/download`
You can specify a local URL by using the `--pluginUrl` option.
```
grafana-cli --pluginUrl https://nexus.company.com/grafana/plugins/<plugin-id>-<plugin-version>.zip plugins install <plugin-id>
```
To manually install a Plugin via the Grafana.com API:
1. Find the plugin you want to download, the plugin id can be found on the Installation Tab on the plugin's page on Grafana.com. In this example, the plugin id is `jdbranham-diagram-panel`:

View File

@ -1,34 +0,0 @@
+++
title = "Keyboard shortcuts"
keywords = ["grafana", "dashboard", "documentation", "shortcuts"]
type = "docs"
[menu.docs]
parent = "dashboard_features"
weight = 8
+++
# Keyboard Shortcuts
No mouse? No problem. Grafana has extensive keyboard shortcuts to allow you to navigate throughout the interface. This comes in especially handy when dealing with dealing with single-purpose machines powering on-wall displays that may not have a mouse available.
## Dashboard Keyboard Shortcuts
Press `Shift`+`?` to open the keyboard shortcut dialog from anywhere within the dashboard views.
<img class="no-shadow" src="/img/docs/v2/Grafana-Keyboard-Shortcuts.gif" style="width:80%;">
|Shortcut|Action|
|---|---|
|`Esc`|Exit fullscreen edit/view mode, close search or any editor view|
|`F`|Open dashboard search view (also contains import/playlist controls)|
|`R`|Refresh (Fetches new data and rerenders panels)|
|`CTRL`+`S`|Save dashboard|
|`CTRL`+`H`|Hide row controls|
|`CTRL`+`Z`|Zoom out|
|`CTRL`+`O`|Enable/Disable shared graph crosshair|
**Note**: Grafana keyboard shortcuts are the same across operating system.
Have a suggestion for a new keyboard shortcut? Let us know.

View File

@ -16,7 +16,7 @@ Since Grafana automatically scales Dashboards to any resolution they're perfect
## Creating a Playlist
{{< docs-imagebox img="/img/docs/v3/playlist.png" max-width="25rem" >}}
{{< docs-imagebox img="/img/docs/v3/playlist.png" max-width="25rem" class="docs-image--right">}}
The Playlist feature can be accessed from Grafana's sidemenu, in the Dashboard submenu.
@ -43,3 +43,25 @@ Playlists can also be manually controlled utilizing the Playlist controls at the
Click the stop button to stop the Playlist, and exit to the current Dashboard.
Click the next button to advance to the next Dashboard in the Playlist.
Click the back button to rewind to the previous Dashboard in the Playlist.
## TV or Kiosk Mode
In TV mode the top navbar, row & panel controls will all fade to transparent.
This happens automatically after one minute of user inactivity but can also be toggled manually
with the `d v` sequence shortcut. Any mouse movement or keyboard action will
restore navbar & controls.
Another feature is the kiosk mode - in kiosk mode the navbar is completely hidden/removed from view. This can be enabled with the `d k`
shortcut.
To put a playlist into kiosk mode, use the `d k` shortcut after the playlist has started. The same shortcut will toggle the playlist out of kiosk mode.
### Linking to the Playlist in Kiosk Mode
If you want to create a link to the playlist with kiosk mode enabled:
1. Copy the Start Url (by right clicking on the Play button and choosing Copy link address).
2. Add the `?kiosk` parameter to the url.
For example, to open the first playlist on the Grafana Play site in kiosk mode: [http://play.grafana.org/playlists/play/1?kiosk](http://play.grafana.org/playlists/play/1?kiosk)

View File

@ -74,7 +74,8 @@ If you do not get an image when opening this link verify that the required font
### Grafana API Key
<img src="/img/docs/v2/orgdropdown_api_keys.png" style="width: 150px" class="right"></img>
{{< docs-imagebox img="/img/docs/v2/orgdropdown_api_keys.png" max-width="150px" class="docs-image--right">}}
You need to set the environment variable `HUBOT_GRAFANA_API_KEY` to a Grafana API Key.
You can add these from the API Keys page which you find in the Organization dropdown.

View File

@ -1,4 +1,4 @@
{
"stable": "4.4.1",
"testing": "4.4.1"
"stable": "4.5.2",
"testing": "4.5.2"
}

View File

@ -4,12 +4,14 @@
"company": "Grafana Labs"
},
"name": "grafana",
"version": "4.5.0-beta1",
"version": "4.6.0-pre1",
"repository": {
"type": "git",
"url": "http://github.com/grafana/grafana.git"
},
"devDependencies": {
"@types/react": "^16.0.5",
"@types/react-dom": "^15.5.4",
"autoprefixer": "^6.4.0",
"es6-promise": "^3.0.2",
"es6-shim": "^0.35.1",
@ -48,7 +50,7 @@
"mocha": "3.2.0",
"phantomjs-prebuilt": "^2.1.14",
"reflect-metadata": "0.1.8",
"rxjs": "^5.0.0-rc.5",
"rxjs": "^5.4.3",
"sass-lint": "^1.10.2",
"systemjs": "0.19.41",
"zone.js": "^0.7.2"
@ -60,22 +62,35 @@
},
"license": "Apache-2.0",
"dependencies": {
"@types/enzyme": "^2.8.8",
"ace-builds": "^1.2.8",
"angular": "^1.6.6",
"angular-bindonce": "^0.3.1",
"angular-mocks": "^1.6.6",
"angular-native-dragdrop": "^1.2.2",
"angular-route": "^1.6.6",
"angular-sanitize": "^1.6.6",
"clipboard": "^1.7.1",
"eventemitter3": "^2.0.2",
"gaze": "^1.1.2",
"grunt-jscs": "3.0.1",
"grunt-sass-lint": "^0.2.2",
"grunt-sync": "^0.6.2",
"jquery": "^3.2.1",
"karma-sinon": "^1.0.5",
"lodash": "^4.17.2",
"lodash": "^4.17.4",
"mousetrap": "^1.6.0",
"ngreact": "^0.4.1",
"react": "^15.6.1",
"react-dom": "^15.6.1",
"react-test-renderer": "^15.6.1",
"remarkable": "^1.7.1",
"sinon": "1.17.6",
"systemjs-builder": "^0.15.34",
"tether": "^1.4.0",
"tether-drop": "https://github.com/torkelo/drop",
"tslint": "^5.1.0",
"typescript": "^2.2.2",
"tslint": "^5.7.0",
"typescript": "^2.5.2",
"virtual-scroll": "^1.1.1"
}
}

View File

@ -1,5 +1,5 @@
#! /usr/bin/env bash
version=4.4.2
version=4.5.2
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
@ -8,15 +8,15 @@ package_cloud push grafana/stable/debian/wheezy grafana_${version}_amd64.deb
package_cloud push grafana/stable/debian/stretch grafana_${version}_amd64.deb
package_cloud push grafana/testing/debian/jessie grafana_${version}_amd64.deb
package_cloud push grafana/testing/debian/wheezy grafana_${version}_amd64.deb
package_cloud push grafana/testing/debian/stretch grafana_${version}_amd64.deb
package_cloud push grafana/testing/debian/wheezy grafana_${version}_amd64.deb --verbose
package_cloud push grafana/testing/debian/stretch grafana_${version}_amd64.deb --verbose
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-${version}-1.x86_64.rpm
package_cloud push grafana/testing/el/6 grafana-${version}-1.x86_64.rpm
package_cloud push grafana/testing/el/7 grafana-${version}-1.x86_64.rpm
package_cloud push grafana/testing/el/6 grafana-${version}-1.x86_64.rpm --verbose
package_cloud push grafana/testing/el/7 grafana-${version}-1.x86_64.rpm --verbose
package_cloud push grafana/stable/el/7 grafana-${version}-1.x86_64.rpm
package_cloud push grafana/stable/el/6 grafana-${version}-1.x86_64.rpm
package_cloud push grafana/stable/el/7 grafana-${version}-1.x86_64.rpm --verbose
package_cloud push grafana/stable/el/6 grafana-${version}-1.x86_64.rpm --verbose
rm grafana*.{deb,rpm}

View File

@ -35,7 +35,7 @@ func AdminCreateUser(c *middleware.Context, form dtos.AdminCreateUserForm) {
return
}
metrics.M_Api_Admin_User_Create.Inc(1)
metrics.M_Api_Admin_User_Create.Inc()
user := cmd.Result

View File

@ -10,7 +10,7 @@ import (
// Register adds http routes
func (hs *HttpServer) registerRoutes() {
r := hs.macaron
macaronR := hs.macaron
reqSignedIn := middleware.Auth(&middleware.AuthOptions{ReqSignedIn: true})
reqGrafanaAdmin := middleware.Auth(&middleware.AuthOptions{ReqSignedIn: true, ReqGrafanaAdmin: true})
reqEditorRole := middleware.RoleAuth(m.ROLE_EDITOR, m.ROLE_ADMIN)
@ -19,7 +19,9 @@ func (hs *HttpServer) registerRoutes() {
bind := binding.Bind
// automatically set HEAD for every GET
r.SetAutoHead(true)
macaronR.SetAutoHead(true)
r := newRouteRegister(middleware.RequestMetrics, middleware.RequestTracing)
// not logged in views
r.Get("/", reqSignedIn, Index)
@ -98,198 +100,195 @@ func (hs *HttpServer) registerRoutes() {
r.Get("/api/login/ping", quota("session"), LoginApiPing)
// authed api
r.Group("/api", func() {
r.Group("/api", func(apiRoute RouteRegister) {
// user (signed in)
r.Group("/user", func() {
r.Get("/", wrap(GetSignedInUser))
r.Put("/", bind(m.UpdateUserCommand{}), wrap(UpdateSignedInUser))
r.Post("/using/:id", wrap(UserSetUsingOrg))
r.Get("/orgs", wrap(GetSignedInUserOrgList))
apiRoute.Group("/user", func(userRoute RouteRegister) {
userRoute.Get("/", wrap(GetSignedInUser))
userRoute.Put("/", bind(m.UpdateUserCommand{}), wrap(UpdateSignedInUser))
userRoute.Post("/using/:id", wrap(UserSetUsingOrg))
userRoute.Get("/orgs", wrap(GetSignedInUserOrgList))
r.Post("/stars/dashboard/:id", wrap(StarDashboard))
r.Delete("/stars/dashboard/:id", wrap(UnstarDashboard))
userRoute.Post("/stars/dashboard/:id", wrap(StarDashboard))
userRoute.Delete("/stars/dashboard/:id", wrap(UnstarDashboard))
r.Put("/password", bind(m.ChangeUserPasswordCommand{}), wrap(ChangeUserPassword))
r.Get("/quotas", wrap(GetUserQuotas))
r.Put("/helpflags/:id", wrap(SetHelpFlag))
userRoute.Put("/password", bind(m.ChangeUserPasswordCommand{}), wrap(ChangeUserPassword))
userRoute.Get("/quotas", wrap(GetUserQuotas))
userRoute.Put("/helpflags/:id", wrap(SetHelpFlag))
// For dev purpose
r.Get("/helpflags/clear", wrap(ClearHelpFlags))
userRoute.Get("/helpflags/clear", wrap(ClearHelpFlags))
r.Get("/preferences", wrap(GetUserPreferences))
r.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateUserPreferences))
userRoute.Get("/preferences", wrap(GetUserPreferences))
userRoute.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateUserPreferences))
})
// users (admin permission required)
r.Group("/users", func() {
r.Get("/", wrap(SearchUsers))
r.Get("/search", wrap(SearchUsersWithPaging))
r.Get("/:id", wrap(GetUserById))
r.Get("/:id/orgs", wrap(GetUserOrgList))
apiRoute.Group("/users", func(usersRoute RouteRegister) {
usersRoute.Get("/", wrap(SearchUsers))
usersRoute.Get("/search", wrap(SearchUsersWithPaging))
usersRoute.Get("/:id", wrap(GetUserById))
usersRoute.Get("/:id/orgs", wrap(GetUserOrgList))
// query parameters /users/lookup?loginOrEmail=admin@example.com
r.Get("/lookup", wrap(GetUserByLoginOrEmail))
r.Put("/:id", bind(m.UpdateUserCommand{}), wrap(UpdateUser))
r.Post("/:id/using/:orgId", wrap(UpdateUserActiveOrg))
usersRoute.Get("/lookup", wrap(GetUserByLoginOrEmail))
usersRoute.Put("/:id", bind(m.UpdateUserCommand{}), wrap(UpdateUser))
usersRoute.Post("/:id/using/:orgId", wrap(UpdateUserActiveOrg))
}, reqGrafanaAdmin)
// org information available to all users.
r.Group("/org", func() {
r.Get("/", wrap(GetOrgCurrent))
r.Get("/quotas", wrap(GetOrgQuotas))
apiRoute.Group("/org", func(orgRoute RouteRegister) {
orgRoute.Get("/", wrap(GetOrgCurrent))
orgRoute.Get("/quotas", wrap(GetOrgQuotas))
})
// current org
r.Group("/org", func() {
r.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrgCurrent))
r.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddressCurrent))
r.Post("/users", quota("user"), bind(m.AddOrgUserCommand{}), wrap(AddOrgUserToCurrentOrg))
r.Get("/users", wrap(GetOrgUsersForCurrentOrg))
r.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUserForCurrentOrg))
r.Delete("/users/:userId", wrap(RemoveOrgUserForCurrentOrg))
apiRoute.Group("/org", func(orgRoute RouteRegister) {
orgRoute.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrgCurrent))
orgRoute.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddressCurrent))
orgRoute.Post("/users", quota("user"), bind(m.AddOrgUserCommand{}), wrap(AddOrgUserToCurrentOrg))
orgRoute.Get("/users", wrap(GetOrgUsersForCurrentOrg))
orgRoute.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUserForCurrentOrg))
orgRoute.Delete("/users/:userId", wrap(RemoveOrgUserForCurrentOrg))
// invites
r.Get("/invites", wrap(GetPendingOrgInvites))
r.Post("/invites", quota("user"), bind(dtos.AddInviteForm{}), wrap(AddOrgInvite))
r.Patch("/invites/:code/revoke", wrap(RevokeInvite))
orgRoute.Get("/invites", wrap(GetPendingOrgInvites))
orgRoute.Post("/invites", quota("user"), bind(dtos.AddInviteForm{}), wrap(AddOrgInvite))
orgRoute.Patch("/invites/:code/revoke", wrap(RevokeInvite))
// prefs
r.Get("/preferences", wrap(GetOrgPreferences))
r.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateOrgPreferences))
orgRoute.Get("/preferences", wrap(GetOrgPreferences))
orgRoute.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateOrgPreferences))
}, reqOrgAdmin)
// create new org
r.Post("/orgs", quota("org"), bind(m.CreateOrgCommand{}), wrap(CreateOrg))
apiRoute.Post("/orgs", quota("org"), bind(m.CreateOrgCommand{}), wrap(CreateOrg))
// search all orgs
r.Get("/orgs", reqGrafanaAdmin, wrap(SearchOrgs))
apiRoute.Get("/orgs", reqGrafanaAdmin, wrap(SearchOrgs))
// orgs (admin routes)
r.Group("/orgs/:orgId", func() {
r.Get("/", wrap(GetOrgById))
r.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrg))
r.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddress))
r.Delete("/", wrap(DeleteOrgById))
r.Get("/users", wrap(GetOrgUsers))
r.Post("/users", bind(m.AddOrgUserCommand{}), wrap(AddOrgUser))
r.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUser))
r.Delete("/users/:userId", wrap(RemoveOrgUser))
r.Get("/quotas", wrap(GetOrgQuotas))
r.Put("/quotas/:target", bind(m.UpdateOrgQuotaCmd{}), wrap(UpdateOrgQuota))
apiRoute.Group("/orgs/:orgId", func(orgsRoute RouteRegister) {
orgsRoute.Get("/", wrap(GetOrgById))
orgsRoute.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrg))
orgsRoute.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddress))
orgsRoute.Delete("/", wrap(DeleteOrgById))
orgsRoute.Get("/users", wrap(GetOrgUsers))
orgsRoute.Post("/users", bind(m.AddOrgUserCommand{}), wrap(AddOrgUser))
orgsRoute.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUser))
orgsRoute.Delete("/users/:userId", wrap(RemoveOrgUser))
orgsRoute.Get("/quotas", wrap(GetOrgQuotas))
orgsRoute.Put("/quotas/:target", bind(m.UpdateOrgQuotaCmd{}), wrap(UpdateOrgQuota))
}, reqGrafanaAdmin)
// orgs (admin routes)
r.Group("/orgs/name/:name", func() {
r.Get("/", wrap(GetOrgByName))
apiRoute.Group("/orgs/name/:name", func(orgsRoute RouteRegister) {
orgsRoute.Get("/", wrap(GetOrgByName))
}, reqGrafanaAdmin)
// auth api keys
r.Group("/auth/keys", func() {
r.Get("/", wrap(GetApiKeys))
r.Post("/", quota("api_key"), bind(m.AddApiKeyCommand{}), wrap(AddApiKey))
r.Delete("/:id", wrap(DeleteApiKey))
apiRoute.Group("/auth/keys", func(keysRoute RouteRegister) {
keysRoute.Get("/", wrap(GetApiKeys))
keysRoute.Post("/", quota("api_key"), bind(m.AddApiKeyCommand{}), wrap(AddApiKey))
keysRoute.Delete("/:id", wrap(DeleteApiKey))
}, reqOrgAdmin)
// Preferences
r.Group("/preferences", func() {
r.Post("/set-home-dash", bind(m.SavePreferencesCommand{}), wrap(SetHomeDashboard))
apiRoute.Group("/preferences", func(prefRoute RouteRegister) {
prefRoute.Post("/set-home-dash", bind(m.SavePreferencesCommand{}), wrap(SetHomeDashboard))
})
// Data sources
r.Group("/datasources", func() {
r.Get("/", wrap(GetDataSources))
r.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), AddDataSource)
r.Put("/:id", bind(m.UpdateDataSourceCommand{}), wrap(UpdateDataSource))
r.Delete("/:id", DeleteDataSourceById)
r.Delete("/name/:name", DeleteDataSourceByName)
r.Get("/:id", wrap(GetDataSourceById))
r.Get("/name/:name", wrap(GetDataSourceByName))
apiRoute.Group("/datasources", func(datasourceRoute RouteRegister) {
datasourceRoute.Get("/", wrap(GetDataSources))
datasourceRoute.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), AddDataSource)
datasourceRoute.Put("/:id", bind(m.UpdateDataSourceCommand{}), wrap(UpdateDataSource))
datasourceRoute.Delete("/:id", DeleteDataSourceById)
datasourceRoute.Delete("/name/:name", DeleteDataSourceByName)
datasourceRoute.Get("/:id", wrap(GetDataSourceById))
datasourceRoute.Get("/name/:name", wrap(GetDataSourceByName))
}, reqOrgAdmin)
r.Get("/datasources/id/:name", wrap(GetDataSourceIdByName), reqSignedIn)
apiRoute.Get("/datasources/id/:name", wrap(GetDataSourceIdByName), reqSignedIn)
r.Get("/plugins", wrap(GetPluginList))
r.Get("/plugins/:pluginId/settings", wrap(GetPluginSettingById))
r.Get("/plugins/:pluginId/markdown/:name", wrap(GetPluginMarkdown))
apiRoute.Get("/plugins", wrap(GetPluginList))
apiRoute.Get("/plugins/:pluginId/settings", wrap(GetPluginSettingById))
apiRoute.Get("/plugins/:pluginId/markdown/:name", wrap(GetPluginMarkdown))
r.Group("/plugins", func() {
r.Get("/:pluginId/dashboards/", wrap(GetPluginDashboards))
r.Post("/:pluginId/settings", bind(m.UpdatePluginSettingCmd{}), wrap(UpdatePluginSetting))
apiRoute.Group("/plugins", func(pluginRoute RouteRegister) {
pluginRoute.Get("/:pluginId/dashboards/", wrap(GetPluginDashboards))
pluginRoute.Post("/:pluginId/settings", bind(m.UpdatePluginSettingCmd{}), wrap(UpdatePluginSetting))
}, reqOrgAdmin)
r.Get("/frontend/settings/", GetFrontendSettings)
r.Any("/datasources/proxy/:id/*", reqSignedIn, hs.ProxyDataSourceRequest)
r.Any("/datasources/proxy/:id", reqSignedIn, hs.ProxyDataSourceRequest)
apiRoute.Get("/frontend/settings/", GetFrontendSettings)
apiRoute.Any("/datasources/proxy/:id/*", reqSignedIn, hs.ProxyDataSourceRequest)
apiRoute.Any("/datasources/proxy/:id", reqSignedIn, hs.ProxyDataSourceRequest)
// Dashboard
r.Group("/dashboards", func() {
r.Get("/db/:slug", GetDashboard)
r.Delete("/db/:slug", reqEditorRole, DeleteDashboard)
apiRoute.Group("/dashboards", func(dashboardRoute RouteRegister) {
dashboardRoute.Get("/db/:slug", GetDashboard)
dashboardRoute.Delete("/db/:slug", reqEditorRole, DeleteDashboard)
r.Get("/id/:dashboardId/versions", wrap(GetDashboardVersions))
r.Get("/id/:dashboardId/versions/:id", wrap(GetDashboardVersion))
r.Post("/id/:dashboardId/restore", reqEditorRole, bind(dtos.RestoreDashboardVersionCommand{}), wrap(RestoreDashboardVersion))
dashboardRoute.Get("/id/:dashboardId/versions", wrap(GetDashboardVersions))
dashboardRoute.Get("/id/:dashboardId/versions/:id", wrap(GetDashboardVersion))
dashboardRoute.Post("/id/:dashboardId/restore", reqEditorRole, bind(dtos.RestoreDashboardVersionCommand{}), wrap(RestoreDashboardVersion))
r.Post("/calculate-diff", bind(dtos.CalculateDiffOptions{}), wrap(CalculateDashboardDiff))
dashboardRoute.Post("/calculate-diff", bind(dtos.CalculateDiffOptions{}), wrap(CalculateDashboardDiff))
r.Post("/db", reqEditorRole, bind(m.SaveDashboardCommand{}), wrap(PostDashboard))
r.Get("/file/:file", GetDashboardFromJsonFile)
r.Get("/home", wrap(GetHomeDashboard))
r.Get("/tags", GetDashboardTags)
r.Post("/import", bind(dtos.ImportDashboardCommand{}), wrap(ImportDashboard))
dashboardRoute.Post("/db", reqEditorRole, bind(m.SaveDashboardCommand{}), wrap(PostDashboard))
dashboardRoute.Get("/file/:file", GetDashboardFromJsonFile)
dashboardRoute.Get("/home", wrap(GetHomeDashboard))
dashboardRoute.Get("/tags", GetDashboardTags)
dashboardRoute.Post("/import", bind(dtos.ImportDashboardCommand{}), wrap(ImportDashboard))
})
// Dashboard snapshots
r.Group("/dashboard/snapshots", func() {
r.Get("/", wrap(SearchDashboardSnapshots))
apiRoute.Group("/dashboard/snapshots", func(dashboardRoute RouteRegister) {
dashboardRoute.Get("/", wrap(SearchDashboardSnapshots))
})
// Playlist
r.Group("/playlists", func() {
r.Get("/", wrap(SearchPlaylists))
r.Get("/:id", ValidateOrgPlaylist, wrap(GetPlaylist))
r.Get("/:id/items", ValidateOrgPlaylist, wrap(GetPlaylistItems))
r.Get("/:id/dashboards", ValidateOrgPlaylist, wrap(GetPlaylistDashboards))
r.Delete("/:id", reqEditorRole, ValidateOrgPlaylist, wrap(DeletePlaylist))
r.Put("/:id", reqEditorRole, bind(m.UpdatePlaylistCommand{}), ValidateOrgPlaylist, wrap(UpdatePlaylist))
r.Post("/", reqEditorRole, bind(m.CreatePlaylistCommand{}), wrap(CreatePlaylist))
apiRoute.Group("/playlists", func(playlistRoute RouteRegister) {
playlistRoute.Get("/", wrap(SearchPlaylists))
playlistRoute.Get("/:id", ValidateOrgPlaylist, wrap(GetPlaylist))
playlistRoute.Get("/:id/items", ValidateOrgPlaylist, wrap(GetPlaylistItems))
playlistRoute.Get("/:id/dashboards", ValidateOrgPlaylist, wrap(GetPlaylistDashboards))
playlistRoute.Delete("/:id", reqEditorRole, ValidateOrgPlaylist, wrap(DeletePlaylist))
playlistRoute.Put("/:id", reqEditorRole, bind(m.UpdatePlaylistCommand{}), ValidateOrgPlaylist, wrap(UpdatePlaylist))
playlistRoute.Post("/", reqEditorRole, bind(m.CreatePlaylistCommand{}), wrap(CreatePlaylist))
})
// Search
r.Get("/search/", Search)
apiRoute.Get("/search/", Search)
// metrics
r.Post("/tsdb/query", bind(dtos.MetricRequest{}), wrap(QueryMetrics))
r.Get("/tsdb/testdata/scenarios", wrap(GetTestDataScenarios))
r.Get("/tsdb/testdata/gensql", reqGrafanaAdmin, wrap(GenerateSqlTestData))
r.Get("/tsdb/testdata/random-walk", wrap(GetTestDataRandomWalk))
apiRoute.Post("/tsdb/query", bind(dtos.MetricRequest{}), wrap(QueryMetrics))
apiRoute.Get("/tsdb/testdata/scenarios", wrap(GetTestDataScenarios))
apiRoute.Get("/tsdb/testdata/gensql", reqGrafanaAdmin, wrap(GenerateSqlTestData))
apiRoute.Get("/tsdb/testdata/random-walk", wrap(GetTestDataRandomWalk))
// metrics
r.Get("/metrics", wrap(GetInternalMetrics))
r.Group("/alerts", func() {
r.Post("/test", bind(dtos.AlertTestCommand{}), wrap(AlertTest))
r.Post("/:alertId/pause", bind(dtos.PauseAlertCommand{}), wrap(PauseAlert), reqEditorRole)
r.Get("/:alertId", ValidateOrgAlert, wrap(GetAlert))
r.Get("/", wrap(GetAlerts))
r.Get("/states-for-dashboard", wrap(GetAlertStatesForDashboard))
apiRoute.Group("/alerts", func(alertsRoute RouteRegister) {
alertsRoute.Post("/test", bind(dtos.AlertTestCommand{}), wrap(AlertTest))
alertsRoute.Post("/:alertId/pause", bind(dtos.PauseAlertCommand{}), wrap(PauseAlert), reqEditorRole)
alertsRoute.Get("/:alertId", ValidateOrgAlert, wrap(GetAlert))
alertsRoute.Get("/", wrap(GetAlerts))
alertsRoute.Get("/states-for-dashboard", wrap(GetAlertStatesForDashboard))
})
r.Get("/alert-notifications", wrap(GetAlertNotifications))
r.Get("/alert-notifiers", wrap(GetAlertNotifiers))
apiRoute.Get("/alert-notifications", wrap(GetAlertNotifications))
apiRoute.Get("/alert-notifiers", wrap(GetAlertNotifiers))
r.Group("/alert-notifications", func() {
r.Post("/test", bind(dtos.NotificationTestCommand{}), wrap(NotificationTest))
r.Post("/", bind(m.CreateAlertNotificationCommand{}), wrap(CreateAlertNotification))
r.Put("/:notificationId", bind(m.UpdateAlertNotificationCommand{}), wrap(UpdateAlertNotification))
r.Get("/:notificationId", wrap(GetAlertNotificationById))
r.Delete("/:notificationId", wrap(DeleteAlertNotification))
apiRoute.Group("/alert-notifications", func(alertNotifications RouteRegister) {
alertNotifications.Post("/test", bind(dtos.NotificationTestCommand{}), wrap(NotificationTest))
alertNotifications.Post("/", bind(m.CreateAlertNotificationCommand{}), wrap(CreateAlertNotification))
alertNotifications.Put("/:notificationId", bind(m.UpdateAlertNotificationCommand{}), wrap(UpdateAlertNotification))
alertNotifications.Get("/:notificationId", wrap(GetAlertNotificationById))
alertNotifications.Delete("/:notificationId", wrap(DeleteAlertNotification))
}, reqEditorRole)
r.Get("/annotations", wrap(GetAnnotations))
r.Post("/annotations/mass-delete", reqOrgAdmin, bind(dtos.DeleteAnnotationsCmd{}), wrap(DeleteAnnotations))
apiRoute.Get("/annotations", wrap(GetAnnotations))
apiRoute.Post("/annotations/mass-delete", reqOrgAdmin, bind(dtos.DeleteAnnotationsCmd{}), wrap(DeleteAnnotations))
r.Group("/annotations", func() {
r.Post("/", bind(dtos.PostAnnotationsCmd{}), wrap(PostAnnotation))
apiRoute.Group("/annotations", func(annotationsRoute RouteRegister) {
annotationsRoute.Post("/", bind(dtos.PostAnnotationsCmd{}), wrap(PostAnnotation))
}, reqEditorRole)
// error test
@ -298,16 +297,16 @@ func (hs *HttpServer) registerRoutes() {
}, reqSignedIn)
// admin api
r.Group("/api/admin", func() {
r.Get("/settings", AdminGetSettings)
r.Post("/users", bind(dtos.AdminCreateUserForm{}), AdminCreateUser)
r.Put("/users/:id/password", bind(dtos.AdminUpdateUserPasswordForm{}), AdminUpdateUserPassword)
r.Put("/users/:id/permissions", bind(dtos.AdminUpdateUserPermissionsForm{}), AdminUpdateUserPermissions)
r.Delete("/users/:id", AdminDeleteUser)
r.Get("/users/:id/quotas", wrap(GetUserQuotas))
r.Put("/users/:id/quotas/:target", bind(m.UpdateUserQuotaCmd{}), wrap(UpdateUserQuota))
r.Get("/stats", AdminGetStats)
r.Post("/pause-all-alerts", bind(dtos.PauseAllAlertsCommand{}), wrap(PauseAllAlerts))
r.Group("/api/admin", func(adminRoute RouteRegister) {
adminRoute.Get("/settings", AdminGetSettings)
adminRoute.Post("/users", bind(dtos.AdminCreateUserForm{}), AdminCreateUser)
adminRoute.Put("/users/:id/password", bind(dtos.AdminUpdateUserPasswordForm{}), AdminUpdateUserPassword)
adminRoute.Put("/users/:id/permissions", bind(dtos.AdminUpdateUserPermissionsForm{}), AdminUpdateUserPermissions)
adminRoute.Delete("/users/:id", AdminDeleteUser)
adminRoute.Get("/users/:id/quotas", wrap(GetUserQuotas))
adminRoute.Put("/users/:id/quotas/:target", bind(m.UpdateUserQuotaCmd{}), wrap(UpdateUserQuota))
adminRoute.Get("/stats", AdminGetStats)
adminRoute.Post("/pause-all-alerts", bind(dtos.PauseAllAlertsCommand{}), wrap(PauseAllAlerts))
}, reqGrafanaAdmin)
// rendering
@ -326,7 +325,9 @@ func (hs *HttpServer) registerRoutes() {
// streams
//r.Post("/api/streams/push", reqSignedIn, bind(dtos.StreamMessage{}), liveConn.PushToStream)
InitAppPluginRoutes(r)
r.Register(macaronR)
r.NotFound(NotFoundHandler)
InitAppPluginRoutes(macaronR)
macaronR.NotFound(NotFoundHandler)
}

View File

@ -17,8 +17,11 @@ import (
)
var pluginProxyTransport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
Renegotiation: tls.RenegotiateFreelyAsClient,
},
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,

View File

@ -266,7 +266,7 @@ func handleGetMetricStatistics(req *cwRequest, c *middleware.Context) {
c.JsonApiErr(500, "Unable to call AWS API", err)
return
}
metrics.M_Aws_CloudWatch_GetMetricStatistics.Inc(1)
metrics.M_Aws_CloudWatch_GetMetricStatistics.Inc()
c.JSON(200, resp)
}
@ -302,7 +302,7 @@ func handleListMetrics(req *cwRequest, c *middleware.Context) {
var resp cloudwatch.ListMetricsOutput
err = svc.ListMetricsPages(params,
func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool {
metrics.M_Aws_CloudWatch_ListMetrics.Inc(1)
metrics.M_Aws_CloudWatch_ListMetrics.Inc()
metrics, _ := awsutil.ValuesAtPath(page, "Metrics")
for _, metric := range metrics {
resp.Metrics = append(resp.Metrics, metric.(*cloudwatch.Metric))

View File

@ -126,7 +126,7 @@ func init() {
"AWS/NATGateway": {"NatGatewayId"},
"AWS/OpsWorks": {"StackId", "LayerId", "InstanceId"},
"AWS/Redshift": {"NodeID", "ClusterIdentifier"},
"AWS/RDS": {"DBInstanceIdentifier", "DBClusterIdentifier", "DatabaseClass", "EngineName", "Role"},
"AWS/RDS": {"DBInstanceIdentifier", "DBClusterIdentifier", "DbClusterIdentifier", "DatabaseClass", "EngineName", "Role"},
"AWS/Route53": {"HealthCheckId", "Region"},
"AWS/S3": {"BucketName", "StorageType", "FilterId"},
"AWS/SES": {},
@ -275,7 +275,7 @@ func getAllMetrics(cwData *datasourceInfo) (cloudwatch.ListMetricsOutput, error)
var resp cloudwatch.ListMetricsOutput
err = svc.ListMetricsPages(params,
func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool {
metrics.M_Aws_CloudWatch_ListMetrics.Inc(1)
metrics.M_Aws_CloudWatch_ListMetrics.Inc()
metrics, _ := awsutil.ValuesAtPath(page, "Metrics")
for _, metric := range metrics {
resp.Metrics = append(resp.Metrics, metric.(*cloudwatch.Metric))

View File

@ -34,13 +34,13 @@ func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapsho
cmd.OrgId = -1
cmd.UserId = -1
metrics.M_Api_Dashboard_Snapshot_External.Inc(1)
metrics.M_Api_Dashboard_Snapshot_External.Inc()
} else {
cmd.Key = util.GetRandomString(32)
cmd.DeleteKey = util.GetRandomString(32)
cmd.OrgId = c.OrgId
cmd.UserId = c.UserId
metrics.M_Api_Dashboard_Snapshot_Create.Inc(1)
metrics.M_Api_Dashboard_Snapshot_Create.Inc()
}
if err := bus.Dispatch(&cmd); err != nil {
@ -84,7 +84,7 @@ func GetDashboardSnapshot(c *middleware.Context) {
},
}
metrics.M_Api_Dashboard_Snapshot_Get.Inc(1)
metrics.M_Api_Dashboard_Snapshot_Get.Inc()
c.Resp.Header().Set("Cache-Control", "public, max-age=3600")
c.JSON(200, dto)

View File

@ -62,6 +62,8 @@ func getFrontendSettingsMap(c *middleware.Context) (map[string]interface{}, erro
if ds.JsonData != nil {
dsMap["jsonData"] = ds.JsonData
} else {
dsMap["jsonData"] = make(map[string]string)
}
if ds.Access == m.DS_ACCESS_DIRECT {

View File

@ -11,6 +11,8 @@ import (
"path"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
gocache "github.com/patrickmn/go-cache"
macaron "gopkg.in/macaron.v1"
@ -165,9 +167,9 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron {
}))
m.Use(hs.healthHandler)
m.Use(hs.metricsEndpoint)
m.Use(middleware.GetContextHandler())
m.Use(middleware.Sessioner(&setting.SessionOptions))
m.Use(middleware.RequestMetrics())
m.Use(middleware.OrgRedirect())
// needs to be after context handler
@ -180,6 +182,14 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron {
return m
}
func (hs *HttpServer) metricsEndpoint(ctx *macaron.Context) {
if ctx.Req.Method != "GET" || ctx.Req.URL.Path != "/metrics" {
return
}
promhttp.Handler().ServeHTTP(ctx.Resp, ctx.Req.Request)
}
func (hs *HttpServer) healthHandler(ctx *macaron.Context) {
if ctx.Req.Method != "GET" || ctx.Req.URL.Path != "/api/health" {
return

View File

@ -127,7 +127,7 @@ func LoginPost(c *middleware.Context, cmd dtos.LoginCommand) Response {
c.SetCookie("redirect_to", "", -1, setting.AppSubUrl+"/")
}
metrics.M_Api_Login_Post.Inc(1)
metrics.M_Api_Login_Post.Inc()
return Json(200, result)
}

View File

@ -186,7 +186,7 @@ func OAuthLogin(ctx *middleware.Context) {
// login
loginUserWithUser(userQuery.Result, ctx)
metrics.M_Api_Login_OAuth.Inc(1)
metrics.M_Api_Login_OAuth.Inc()
if redirectTo, _ := url.QueryUnescape(ctx.GetCookie("redirect_to")); len(redirectTo) > 0 {
ctx.SetCookie("redirect_to", "", -1, setting.AppSubUrl+"/")

View File

@ -2,13 +2,10 @@ package api
import (
"context"
"encoding/json"
"net/http"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/metrics"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/tsdb"
@ -34,7 +31,7 @@ func QueryMetrics(c *middleware.Context, reqDto dtos.MetricRequest) Response {
return ApiError(500, "failed to fetch data source", err)
}
request := &tsdb.Request{TimeRange: timeRange}
request := &tsdb.TsdbQuery{TimeRange: timeRange}
for _, query := range reqDto.Queries {
request.Queries = append(request.Queries, &tsdb.Query{
@ -46,7 +43,7 @@ func QueryMetrics(c *middleware.Context, reqDto dtos.MetricRequest) Response {
})
}
resp, err := tsdb.HandleRequest(context.Background(), request)
resp, err := tsdb.HandleRequest(context.Background(), dsQuery.Result, request)
if err != nil {
return ApiError(500, "Metric request error", err)
}
@ -79,58 +76,6 @@ func GetTestDataScenarios(c *middleware.Context) Response {
return Json(200, &result)
}
func GetInternalMetrics(c *middleware.Context) Response {
if metrics.UseNilMetrics {
return Json(200, util.DynMap{"message": "Metrics disabled"})
}
snapshots := metrics.MetricStats.GetSnapshots()
resp := make(map[string]interface{})
for _, m := range snapshots {
metricName := m.Name() + m.StringifyTags()
switch metric := m.(type) {
case metrics.Gauge:
resp[metricName] = map[string]interface{}{
"value": metric.Value(),
}
case metrics.Counter:
resp[metricName] = map[string]interface{}{
"count": metric.Count(),
}
case metrics.Timer:
percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
resp[metricName] = map[string]interface{}{
"count": metric.Count(),
"min": metric.Min(),
"max": metric.Max(),
"mean": metric.Mean(),
"std": metric.StdDev(),
"p25": percentiles[0],
"p75": percentiles[1],
"p90": percentiles[2],
"p99": percentiles[3],
}
}
}
var b []byte
var err error
if b, err = json.MarshalIndent(resp, "", " "); err != nil {
return ApiError(500, "body json marshal", err)
}
return &NormalResponse{
body: b,
status: 200,
header: http.Header{
"Content-Type": []string{"application/json"},
},
}
}
// Genereates a index out of range error
func GenerateError(c *middleware.Context) Response {
var array []string
@ -153,18 +98,19 @@ func GetTestDataRandomWalk(c *middleware.Context) Response {
intervalMs := c.QueryInt64("intervalMs")
timeRange := tsdb.NewTimeRange(from, to)
request := &tsdb.Request{TimeRange: timeRange}
request := &tsdb.TsdbQuery{TimeRange: timeRange}
dsInfo := &models.DataSource{Type: "grafana-testdata-datasource"}
request.Queries = append(request.Queries, &tsdb.Query{
RefId: "A",
IntervalMs: intervalMs,
Model: simplejson.NewFromAny(&util.DynMap{
"scenario": "random_walk",
}),
DataSource: &models.DataSource{Type: "grafana-testdata-datasource"},
DataSource: dsInfo,
})
resp, err := tsdb.HandleRequest(context.Background(), request)
resp, err := tsdb.HandleRequest(context.Background(), dsInfo, request)
if err != nil {
return ApiError(500, "Metric request error", err)
}

View File

@ -89,7 +89,7 @@ func CreateOrg(c *middleware.Context, cmd m.CreateOrgCommand) Response {
return ApiError(500, "Failed to create organization", err)
}
metrics.M_Api_Org_Create.Inc(1)
metrics.M_Api_Org_Create.Inc()
return Json(200, &util.DynMap{
"orgId": cmd.Result.Id,

View File

@ -187,8 +187,8 @@ func CompleteInvite(c *middleware.Context, completeInvite dtos.CompleteInviteFor
loginUserWithUser(user, c)
metrics.M_Api_User_SignUpCompleted.Inc(1)
metrics.M_Api_User_SignUpInvite.Inc(1)
metrics.M_Api_User_SignUpCompleted.Inc()
metrics.M_Api_User_SignUpInvite.Inc()
return ApiSuccess("User created and logged in")
}

View File

@ -15,6 +15,8 @@ import (
"text/template"
"time"
"github.com/opentracing/opentracing-go"
"github.com/grafana/grafana/pkg/api/cloudwatch"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/middleware"
@ -85,6 +87,20 @@ func (proxy *DataSourceProxy) HandleRequest() {
proxy.logRequest()
span, ctx := opentracing.StartSpanFromContext(proxy.ctx.Req.Context(), "datasource reverse proxy")
proxy.ctx.Req.Request = proxy.ctx.Req.WithContext(ctx)
defer span.Finish()
span.SetTag("datasource_id", proxy.ds.Id)
span.SetTag("datasource_type", proxy.ds.Type)
span.SetTag("user_id", proxy.ctx.SignedInUser.UserId)
span.SetTag("org_id", proxy.ctx.SignedInUser.OrgId)
opentracing.GlobalTracer().Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(proxy.ctx.Req.Request.Header))
reverseProxy.ServeHTTP(proxy.ctx.Resp, proxy.ctx.Req.Request)
proxy.ctx.Resp.Header().Del("Set-Cookie")
}

123
pkg/api/route_register.go Normal file
View File

@ -0,0 +1,123 @@
package api
import (
"net/http"
macaron "gopkg.in/macaron.v1"
)
type Router interface {
Handle(method, pattern string, handlers []macaron.Handler) *macaron.Route
Get(pattern string, handlers ...macaron.Handler) *macaron.Route
}
type RouteRegister interface {
Get(string, ...macaron.Handler)
Post(string, ...macaron.Handler)
Delete(string, ...macaron.Handler)
Put(string, ...macaron.Handler)
Patch(string, ...macaron.Handler)
Any(string, ...macaron.Handler)
Group(string, func(RouteRegister), ...macaron.Handler)
Register(Router) *macaron.Router
}
type RegisterNamedMiddleware func(name string) macaron.Handler
func newRouteRegister(namedMiddleware ...RegisterNamedMiddleware) RouteRegister {
return &routeRegister{
prefix: "",
routes: []route{},
subfixHandlers: []macaron.Handler{},
namedMiddleware: namedMiddleware,
}
}
type route struct {
method string
pattern string
handlers []macaron.Handler
}
type routeRegister struct {
prefix string
subfixHandlers []macaron.Handler
namedMiddleware []RegisterNamedMiddleware
routes []route
groups []*routeRegister
}
func (rr *routeRegister) Group(pattern string, fn func(rr RouteRegister), handlers ...macaron.Handler) {
group := &routeRegister{
prefix: rr.prefix + pattern,
subfixHandlers: append(rr.subfixHandlers, handlers...),
routes: []route{},
namedMiddleware: rr.namedMiddleware,
}
fn(group)
rr.groups = append(rr.groups, group)
}
func (rr *routeRegister) Register(router Router) *macaron.Router {
for _, r := range rr.routes {
// GET requests have to be added to macaron routing using Get()
// Otherwise HEAD requests will not be allowed.
// https://github.com/go-macaron/macaron/blob/a325110f8b392bce3e5cdeb8c44bf98078ada3be/router.go#L198
if r.method == http.MethodGet {
router.Get(r.pattern, r.handlers...)
} else {
router.Handle(r.method, r.pattern, r.handlers)
}
}
for _, g := range rr.groups {
g.Register(router)
}
return &macaron.Router{}
}
func (rr *routeRegister) route(pattern, method string, handlers ...macaron.Handler) {
//inject tracing
h := make([]macaron.Handler, 0)
for _, fn := range rr.namedMiddleware {
h = append(h, fn(pattern))
}
h = append(h, rr.subfixHandlers...)
h = append(h, handlers...)
rr.routes = append(rr.routes, route{
method: method,
pattern: rr.prefix + pattern,
handlers: h,
})
}
func (rr *routeRegister) Get(pattern string, handlers ...macaron.Handler) {
rr.route(pattern, http.MethodGet, handlers...)
}
func (rr *routeRegister) Post(pattern string, handlers ...macaron.Handler) {
rr.route(pattern, http.MethodPost, handlers...)
}
func (rr *routeRegister) Delete(pattern string, handlers ...macaron.Handler) {
rr.route(pattern, http.MethodDelete, handlers...)
}
func (rr *routeRegister) Put(pattern string, handlers ...macaron.Handler) {
rr.route(pattern, http.MethodPut, handlers...)
}
func (rr *routeRegister) Patch(pattern string, handlers ...macaron.Handler) {
rr.route(pattern, http.MethodPatch, handlers...)
}
func (rr *routeRegister) Any(pattern string, handlers ...macaron.Handler) {
rr.route(pattern, "*", handlers...)
}

View File

@ -0,0 +1,196 @@
package api
import (
"net/http"
"strconv"
"testing"
macaron "gopkg.in/macaron.v1"
)
type fakeRouter struct {
route []route
}
func (fr *fakeRouter) Handle(method, pattern string, handlers []macaron.Handler) *macaron.Route {
fr.route = append(fr.route, route{
pattern: pattern,
method: method,
handlers: handlers,
})
return &macaron.Route{}
}
func (fr *fakeRouter) Get(pattern string, handlers ...macaron.Handler) *macaron.Route {
fr.route = append(fr.route, route{
pattern: pattern,
method: http.MethodGet,
handlers: handlers,
})
return &macaron.Route{}
}
func emptyHandlers(n int) []macaron.Handler {
res := []macaron.Handler{}
for i := 1; n >= i; i++ {
res = append(res, emptyHandler(strconv.Itoa(i)))
}
return res
}
func emptyHandler(name string) macaron.Handler {
return struct{ name string }{name: name}
}
func TestRouteSimpleRegister(t *testing.T) {
testTable := []route{
{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(2)},
{method: "GET", pattern: "/down", handlers: emptyHandlers(3)},
}
// Setup
rr := newRouteRegister(func(name string) macaron.Handler {
return emptyHandler(name)
})
rr.Delete("/admin", emptyHandler("1"))
rr.Get("/down", emptyHandler("1"), emptyHandler("2"))
fr := &fakeRouter{}
rr.Register(fr)
// Validation
if len(fr.route) != len(testTable) {
t.Errorf("want %v routes, got %v", len(testTable), len(fr.route))
}
for i := range testTable {
if testTable[i].method != fr.route[i].method {
t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method)
}
if testTable[i].pattern != fr.route[i].pattern {
t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern)
}
if len(testTable[i].handlers) != len(fr.route[i].handlers) {
t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n",
len(testTable[i].handlers),
len(fr.route[i].handlers),
testTable[i],
fr.route[i])
}
}
}
func TestRouteGroupedRegister(t *testing.T) {
testTable := []route{
{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(1)},
{method: "GET", pattern: "/down", handlers: emptyHandlers(2)},
{method: "POST", pattern: "/user", handlers: emptyHandlers(1)},
{method: "PUT", pattern: "/user/friends", handlers: emptyHandlers(1)},
{method: "DELETE", pattern: "/user/admin", handlers: emptyHandlers(2)},
{method: "GET", pattern: "/user/admin/all", handlers: emptyHandlers(4)},
}
// Setup
rr := newRouteRegister()
rr.Delete("/admin", emptyHandler("1"))
rr.Get("/down", emptyHandler("1"), emptyHandler("2"))
rr.Group("/user", func(user RouteRegister) {
user.Post("", emptyHandler("1"))
user.Put("/friends", emptyHandler("2"))
user.Group("/admin", func(admin RouteRegister) {
admin.Delete("", emptyHandler("3"))
admin.Get("/all", emptyHandler("3"), emptyHandler("4"), emptyHandler("5"))
}, emptyHandler("3"))
})
fr := &fakeRouter{}
rr.Register(fr)
// Validation
if len(fr.route) != len(testTable) {
t.Errorf("want %v routes, got %v", len(testTable), len(fr.route))
}
for i := range testTable {
if testTable[i].method != fr.route[i].method {
t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method)
}
if testTable[i].pattern != fr.route[i].pattern {
t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern)
}
if len(testTable[i].handlers) != len(fr.route[i].handlers) {
t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n",
len(testTable[i].handlers),
len(fr.route[i].handlers),
testTable[i],
fr.route[i])
}
}
}
func TestNamedMiddlewareRouteRegister(t *testing.T) {
testTable := []route{
{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(2)},
{method: "GET", pattern: "/down", handlers: emptyHandlers(3)},
{method: "POST", pattern: "/user", handlers: emptyHandlers(2)},
{method: "PUT", pattern: "/user/friends", handlers: emptyHandlers(2)},
{method: "DELETE", pattern: "/user/admin", handlers: emptyHandlers(3)},
{method: "GET", pattern: "/user/admin/all", handlers: emptyHandlers(5)},
}
// Setup
rr := newRouteRegister(func(name string) macaron.Handler {
return emptyHandler(name)
})
rr.Delete("/admin", emptyHandler("1"))
rr.Get("/down", emptyHandler("1"), emptyHandler("2"))
rr.Group("/user", func(user RouteRegister) {
user.Post("", emptyHandler("1"))
user.Put("/friends", emptyHandler("2"))
user.Group("/admin", func(admin RouteRegister) {
admin.Delete("", emptyHandler("3"))
admin.Get("/all", emptyHandler("3"), emptyHandler("4"), emptyHandler("5"))
}, emptyHandler("3"))
})
fr := &fakeRouter{}
rr.Register(fr)
// Validation
if len(fr.route) != len(testTable) {
t.Errorf("want %v routes, got %v", len(testTable), len(fr.route))
}
for i := range testTable {
if testTable[i].method != fr.route[i].method {
t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method)
}
if testTable[i].pattern != fr.route[i].pattern {
t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern)
}
if len(testTable[i].handlers) != len(fr.route[i].handlers) {
t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n",
len(testTable[i].handlers),
len(fr.route[i].handlers),
testTable[i],
fr.route[i])
}
}
}

View File

@ -47,7 +47,7 @@ func SignUp(c *middleware.Context, form dtos.SignUpForm) Response {
Code: cmd.Code,
})
metrics.M_Api_User_SignUpStarted.Inc(1)
metrics.M_Api_User_SignUpStarted.Inc()
return Json(200, util.DynMap{"status": "SignUpCreated"})
}
@ -111,7 +111,7 @@ func SignUpStep2(c *middleware.Context, form dtos.SignUpStep2Form) Response {
}
loginUserWithUser(user, c)
metrics.M_Api_User_SignUpCompleted.Inc(1)
metrics.M_Api_User_SignUpCompleted.Inc()
return Json(200, apiResponse)
}

View File

@ -19,6 +19,7 @@ type CommandLine interface {
PluginDirectory() string
RepoDirectory() string
PluginURL() string
}
type contextCommandLine struct {
@ -44,3 +45,7 @@ func (c *contextCommandLine) PluginDirectory() string {
func (c *contextCommandLine) RepoDirectory() string {
return c.GlobalString("repo")
}
func (c *contextCommandLine) PluginURL() string {
return c.GlobalString("pluginUrl")
}

View File

@ -101,3 +101,7 @@ func (fcli *FakeCommandLine) RepoDirectory() string {
func (fcli *FakeCommandLine) PluginDirectory() string {
return fcli.GlobalString("pluginsDir")
}
func (fcli *FakeCommandLine) PluginURL() string {
return fcli.GlobalString("pluginUrl")
}

View File

@ -58,37 +58,39 @@ func installCommand(c CommandLine) error {
}
func InstallPlugin(pluginName, version string, c CommandLine) error {
plugin, err := s.GetPlugin(pluginName, c.RepoDirectory())
pluginFolder := c.PluginDirectory()
if err != nil {
return err
downloadURL := c.PluginURL()
if downloadURL == "" {
plugin, err := s.GetPlugin(pluginName, c.RepoDirectory())
if err != nil {
return err
}
v, err := SelectVersion(plugin, version)
if err != nil {
return err
}
if version == "" {
version = v.Version
}
downloadURL = fmt.Sprintf("%s/%s/versions/%s/download",
c.GlobalString("repo"),
pluginName,
version)
}
v, err := SelectVersion(plugin, version)
if err != nil {
return err
}
if version == "" {
version = v.Version
}
downloadURL := fmt.Sprintf("%s/%s/versions/%s/download",
c.GlobalString("repo"),
pluginName,
version)
logger.Infof("installing %v @ %v\n", plugin.Id, version)
logger.Infof("installing %v @ %v\n", pluginName, version)
logger.Infof("from url: %v\n", downloadURL)
logger.Infof("into: %v\n", pluginFolder)
logger.Info("\n")
err = downloadFile(plugin.Id, pluginFolder, downloadURL)
err := downloadFile(pluginName, pluginFolder, downloadURL)
if err != nil {
return err
}
logger.Infof("%s Installed %s successfully \n", color.GreenString("✔"), plugin.Id)
logger.Infof("%s Installed %s successfully \n", color.GreenString("✔"), pluginName)
res, _ := s.ReadPlugin(pluginFolder, pluginName)
for _, v := range res.Dependencies.Plugins {

View File

@ -38,6 +38,12 @@ func main() {
Value: "https://grafana.com/api/plugins",
EnvVar: "GF_PLUGIN_REPO",
},
cli.StringFlag{
Name: "pluginUrl",
Usage: "Full url to the plugin zip file instead of downloading the plugin from grafana.com/api",
Value: "",
EnvVar: "GF_PLUGIN_URL",
},
cli.BoolFlag{
Name: "debug, d",
Usage: "enable debug logging",

View File

@ -14,6 +14,7 @@ import (
"net/http"
_ "net/http/pprof"
"github.com/grafana/grafana/pkg/metrics"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/setting"
@ -22,14 +23,14 @@ import (
_ "github.com/grafana/grafana/pkg/services/alerting/notifiers"
_ "github.com/grafana/grafana/pkg/tsdb/graphite"
_ "github.com/grafana/grafana/pkg/tsdb/influxdb"
_ "github.com/grafana/grafana/pkg/tsdb/mqe"
_ "github.com/grafana/grafana/pkg/tsdb/mysql"
_ "github.com/grafana/grafana/pkg/tsdb/opentsdb"
_ "github.com/grafana/grafana/pkg/tsdb/prometheus"
_ "github.com/grafana/grafana/pkg/tsdb/testdata"
)
var version = "4.1.0"
var version = "4.6.0"
var commit = "NA"
var buildstamp string
var build_date string
@ -80,6 +81,8 @@ func main() {
setting.BuildCommit = commit
setting.BuildStamp = buildstampInt64
metrics.M_Grafana_Version.WithLabelValues(version).Set(1)
server := NewGrafanaServer()
server.Start()
}

View File

@ -24,6 +24,7 @@ import (
"github.com/grafana/grafana/pkg/services/search"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/social"
"github.com/grafana/grafana/pkg/tracing"
)
func NewGrafanaServer() models.GrafanaServer {
@ -54,13 +55,21 @@ func (g *GrafanaServerImpl) Start() {
g.writePIDFile()
initSql()
metrics.Init()
metrics.Init(setting.Cfg)
search.Init()
login.Init()
social.NewOAuthService()
eventpublisher.Init()
plugins.Init()
closer, err := tracing.Init(setting.Cfg)
if err != nil {
g.log.Error("Tracing settings is not valid", "error", err)
g.Shutdown(1, "Startup failed")
return
}
defer closer.Close()
// init alerting
if setting.AlertingEnabled && setting.ExecuteAlerts {
engine := alerting.NewEngine()
@ -71,8 +80,8 @@ func (g *GrafanaServerImpl) Start() {
cleanUpService := cleanup.NewCleanUpService()
g.childRoutines.Go(func() error { return cleanUpService.Run(g.context) })
if err := notifications.Init(); err != nil {
g.log.Error("Notification service failed to initialize", "erro", err)
if err = notifications.Init(); err != nil {
g.log.Error("Notification service failed to initialize", "error", err)
g.Shutdown(1, "Startup failed")
return
}

View File

@ -0,0 +1,88 @@
package imguploader
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/util"
"golang.org/x/oauth2/google"
)
const (
tokenUrl string = "https://www.googleapis.com/auth/devstorage.read_write"
uploadUrl string = "https://www.googleapis.com/upload/storage/v1/b/%s/o?uploadType=media&name=%s&predefinedAcl=publicRead"
)
type GCSUploader struct {
keyFile string
bucket string
log log.Logger
}
func NewGCSUploader(keyFile, bucket string) *GCSUploader {
return &GCSUploader{
keyFile: keyFile,
bucket: bucket,
log: log.New("gcsuploader"),
}
}
func (u *GCSUploader) Upload(ctx context.Context, imageDiskPath string) (string, error) {
key := util.GetRandomString(20) + ".png"
u.log.Debug("Opening key file ", u.keyFile)
data, err := ioutil.ReadFile(u.keyFile)
if err != nil {
return "", err
}
u.log.Debug("Creating JWT conf")
conf, err := google.JWTConfigFromJSON(data, tokenUrl)
if err != nil {
return "", err
}
u.log.Debug("Creating HTTP client")
client := conf.Client(ctx)
err = u.uploadFile(client, imageDiskPath, key)
if err != nil {
return "", err
}
return fmt.Sprintf("https://storage.googleapis.com/%s/%s", u.bucket, key), nil
}
func (u *GCSUploader) uploadFile(client *http.Client, imageDiskPath, key string) error {
u.log.Debug("Opening image file ", imageDiskPath)
fileReader, err := os.Open(imageDiskPath)
if err != nil {
return err
}
reqUrl := fmt.Sprintf(uploadUrl, u.bucket, key)
u.log.Debug("Request URL: ", reqUrl)
req, err := http.NewRequest("POST", reqUrl, fileReader)
if err != nil {
return err
}
req.Header.Add("Content-Type", "image/png")
u.log.Debug("Sending POST request to GCS")
resp, err := client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("GCS response status code %d", resp.StatusCode)
}
return nil
}

View File

@ -0,0 +1,24 @@
package imguploader
import (
"context"
"testing"
"github.com/grafana/grafana/pkg/setting"
. "github.com/smartystreets/goconvey/convey"
)
func TestUploadToGCS(t *testing.T) {
SkipConvey("[Integration test] for external_image_store.gcs", t, func() {
setting.NewConfigContext(&setting.CommandLineArgs{
HomePath: "../../../",
})
gcsUploader, _ := NewImageUploader()
path, err := gcsUploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
So(err, ShouldBeNil)
So(path, ShouldNotEqual, "")
})
}

View File

@ -1,6 +1,7 @@
package imguploader
import (
"context"
"fmt"
"regexp"
@ -8,13 +9,13 @@ import (
)
type ImageUploader interface {
Upload(path string) (string, error)
Upload(ctx context.Context, path string) (string, error)
}
type NopImageUploader struct {
}
func (NopImageUploader) Upload(path string) (string, error) {
func (NopImageUploader) Upload(ctx context.Context, path string) (string, error) {
return "", nil
}
@ -52,6 +53,16 @@ func NewImageUploader() (ImageUploader, error) {
password := webdavSec.Key("password").String()
return NewWebdavImageUploader(url, username, password, public_url)
case "gcs":
gcssec, err := setting.Cfg.GetSection("external_image_storage.gcs")
if err != nil {
return nil, err
}
keyFile := gcssec.Key("key_file").MustString("")
bucketName := gcssec.Key("bucket").MustString("")
return NewGCSUploader(keyFile, bucketName), nil
}
return NopImageUploader{}, nil

View File

@ -96,5 +96,28 @@ func TestImageUploaderFactory(t *testing.T) {
So(original.username, ShouldEqual, "username")
So(original.password, ShouldEqual, "password")
})
Convey("GCS uploader", func() {
var err error
setting.NewConfigContext(&setting.CommandLineArgs{
HomePath: "../../../",
})
setting.ImageUploadProvider = "gcs"
gcpSec, err := setting.Cfg.GetSection("external_image_storage.gcs")
gcpSec.NewKey("key_file", "/etc/secrets/project-79a52befa3f6.json")
gcpSec.NewKey("bucket", "project-grafana-east")
uploader, err := NewImageUploader()
So(err, ShouldBeNil)
original, ok := uploader.(*GCSUploader)
So(ok, ShouldBeTrue)
So(original.keyFile, ShouldEqual, "/etc/secrets/project-79a52befa3f6.json")
So(original.bucket, ShouldEqual, "project-grafana-east")
})
})
}

View File

@ -1,6 +1,7 @@
package imguploader
import (
"context"
"os"
"time"
@ -34,7 +35,7 @@ func NewS3Uploader(region, bucket, acl, accessKey, secretKey string) *S3Uploader
}
}
func (u *S3Uploader) Upload(imageDiskPath string) (string, error) {
func (u *S3Uploader) Upload(ctx context.Context, imageDiskPath string) (string, error) {
sess, err := session.NewSession()
if err != nil {
return "", err

View File

@ -1,6 +1,7 @@
package imguploader
import (
"context"
"testing"
"github.com/grafana/grafana/pkg/setting"
@ -15,7 +16,7 @@ func TestUploadToS3(t *testing.T) {
s3Uploader, _ := NewImageUploader()
path, err := s3Uploader.Upload("../../../public/img/logo_transparent_400x.png")
path, err := s3Uploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
So(err, ShouldBeNil)
So(path, ShouldNotEqual, "")

View File

@ -2,6 +2,7 @@ package imguploader
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net"
@ -33,7 +34,7 @@ var netClient = &http.Client{
Transport: netTransport,
}
func (u *WebdavUploader) Upload(pa string) (string, error) {
func (u *WebdavUploader) Upload(ctx context.Context, pa string) (string, error) {
url, _ := url.Parse(u.url)
filename := util.GetRandomString(20) + ".png"
url.Path = path.Join(url.Path, filename)

View File

@ -1,6 +1,7 @@
package imguploader
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
@ -11,7 +12,7 @@ func TestUploadToWebdav(t *testing.T) {
// Can be tested with this docker container: https://hub.docker.com/r/morrisjobke/webdav/
SkipConvey("[Integration test] for external_image_store.webdav", t, func() {
webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "")
path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png")
path, err := webdavUploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
So(err, ShouldBeNil)
So(path, ShouldStartWith, "http://localhost:8888/webdav/")
@ -19,7 +20,7 @@ func TestUploadToWebdav(t *testing.T) {
SkipConvey("[Integration test] for external_image_store.webdav with public url", t, func() {
webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "http://publicurl:8888/webdav")
path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png")
path, err := webdavUploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
So(err, ShouldBeNil)
So(path, ShouldStartWith, "http://publicurl:8888/webdav/")

View File

@ -1,122 +0,0 @@
// includes code from
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
// Copyright 2012 Richard Crowley. All rights reserved.
package metrics
import (
"math"
"sync"
"sync/atomic"
)
// EWMAs continuously calculate an exponentially-weighted moving average
// based on an outside source of clock ticks.
type EWMA interface {
Rate() float64
Snapshot() EWMA
Tick()
Update(int64)
}
// NewEWMA constructs a new EWMA with the given alpha.
func NewEWMA(alpha float64) EWMA {
if UseNilMetrics {
return NilEWMA{}
}
return &StandardEWMA{alpha: alpha}
}
// NewEWMA1 constructs a new EWMA for a one-minute moving average.
func NewEWMA1() EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/1))
}
// NewEWMA5 constructs a new EWMA for a five-minute moving average.
func NewEWMA5() EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/5))
}
// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
func NewEWMA15() EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/15))
}
// EWMASnapshot is a read-only copy of another EWMA.
type EWMASnapshot float64
// Rate returns the rate of events per second at the time the snapshot was
// taken.
func (a EWMASnapshot) Rate() float64 { return float64(a) }
// Snapshot returns the snapshot.
func (a EWMASnapshot) Snapshot() EWMA { return a }
// Tick panics.
func (EWMASnapshot) Tick() {
panic("Tick called on an EWMASnapshot")
}
// Update panics.
func (EWMASnapshot) Update(int64) {
panic("Update called on an EWMASnapshot")
}
// NilEWMA is a no-op EWMA.
type NilEWMA struct{}
// Rate is a no-op.
func (NilEWMA) Rate() float64 { return 0.0 }
// Snapshot is a no-op.
func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
// Tick is a no-op.
func (NilEWMA) Tick() {}
// Update is a no-op.
func (NilEWMA) Update(n int64) {}
// StandardEWMA is the standard implementation of an EWMA and tracks the number
// of uncounted events and processes them on each tick. It uses the
// sync/atomic package to manage uncounted events.
type StandardEWMA struct {
uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
alpha float64
rate float64
init bool
mutex sync.Mutex
}
// Rate returns the moving average rate of events per second.
func (a *StandardEWMA) Rate() float64 {
a.mutex.Lock()
defer a.mutex.Unlock()
return a.rate * float64(1e9)
}
// Snapshot returns a read-only copy of the EWMA.
func (a *StandardEWMA) Snapshot() EWMA {
return EWMASnapshot(a.Rate())
}
// Tick ticks the clock to update the moving average. It assumes it is called
// every five seconds.
func (a *StandardEWMA) Tick() {
count := atomic.LoadInt64(&a.uncounted)
atomic.AddInt64(&a.uncounted, -count)
instantRate := float64(count) / float64(5e9)
a.mutex.Lock()
defer a.mutex.Unlock()
if a.init {
a.rate += a.alpha * (instantRate - a.rate)
} else {
a.init = true
a.rate = instantRate
}
}
// Update adds n uncounted events.
func (a *StandardEWMA) Update(n int64) {
atomic.AddInt64(&a.uncounted, n)
}

View File

@ -1,46 +0,0 @@
package metrics
// type comboCounterRef struct {
// *MetricMeta
// usageCounter Counter
// metricCounter Counter
// }
//
// func RegComboCounter(name string, tagStrings ...string) Counter {
// meta := NewMetricMeta(name, tagStrings)
// cr := &comboCounterRef{
// MetricMeta: meta,
// usageCounter: NewCounter(meta),
// metricCounter: NewCounter(meta),
// }
//
// UsageStats.Register(cr.usageCounter)
// MetricStats.Register(cr.metricCounter)
//
// return cr
// }
//
// func (c comboCounterRef) Clear() {
// c.usageCounter.Clear()
// c.metricCounter.Clear()
// }
//
// func (c comboCounterRef) Count() int64 {
// panic("Count called on a combocounter ref")
// }
//
// // Dec panics.
// func (c comboCounterRef) Dec(i int64) {
// c.usageCounter.Dec(i)
// c.metricCounter.Dec(i)
// }
//
// // Inc panics.
// func (c comboCounterRef) Inc(i int64) {
// c.usageCounter.Inc(i)
// c.metricCounter.Inc(i)
// }
//
// func (c comboCounterRef) Snapshot() Metric {
// return c.metricCounter.Snapshot()
// }

View File

@ -1,61 +0,0 @@
package metrics
import "github.com/grafana/grafana/pkg/log"
type MetricMeta struct {
tags map[string]string
name string
}
func NewMetricMeta(name string, tagStrings []string) *MetricMeta {
if len(tagStrings)%2 != 0 {
log.Fatal(3, "Metrics: tags array is missing value for key, %v", tagStrings)
}
tags := make(map[string]string)
for i := 0; i < len(tagStrings); i += 2 {
tags[tagStrings[i]] = tagStrings[i+1]
}
return &MetricMeta{
tags: tags,
name: name,
}
}
func (m *MetricMeta) Name() string {
return m.name
}
func (m *MetricMeta) GetTagsCopy() map[string]string {
if len(m.tags) == 0 {
return make(map[string]string)
}
copy := make(map[string]string)
for k2, v2 := range m.tags {
copy[k2] = v2
}
return copy
}
func (m *MetricMeta) StringifyTags() string {
if len(m.tags) == 0 {
return ""
}
str := ""
for key, value := range m.tags {
str += "." + key + "_" + value
}
return str
}
type Metric interface {
Name() string
GetTagsCopy() map[string]string
StringifyTags() string
Snapshot() Metric
}

View File

@ -1,61 +0,0 @@
package metrics
import "sync/atomic"
// Counters hold an int64 value that can be incremented and decremented.
type Counter interface {
Metric
Clear()
Count() int64
Dec(int64)
Inc(int64)
}
// NewCounter constructs a new StandardCounter.
func NewCounter(meta *MetricMeta) Counter {
return &StandardCounter{
MetricMeta: meta,
count: 0,
}
}
func RegCounter(name string, tagStrings ...string) Counter {
cr := NewCounter(NewMetricMeta(name, tagStrings))
MetricStats.Register(cr)
return cr
}
// StandardCounter is the standard implementation of a Counter and uses the
// sync/atomic package to manage a single int64 value.
type StandardCounter struct {
count int64 //Due to a bug in golang the 64bit variable need to come first to be 64bit aligned. https://golang.org/pkg/sync/atomic/#pkg-note-BUG
*MetricMeta
}
// Clear sets the counter to zero.
func (c *StandardCounter) Clear() {
atomic.StoreInt64(&c.count, 0)
}
// Count returns the current count.
func (c *StandardCounter) Count() int64 {
return atomic.LoadInt64(&c.count)
}
// Dec decrements the counter by the given amount.
func (c *StandardCounter) Dec(i int64) {
atomic.AddInt64(&c.count, -i)
}
// Inc increments the counter by the given amount.
func (c *StandardCounter) Inc(i int64) {
atomic.AddInt64(&c.count, i)
}
func (c *StandardCounter) Snapshot() Metric {
return &StandardCounter{
MetricMeta: c.MetricMeta,
count: c.count,
}
}

View File

@ -1,11 +0,0 @@
package metrics
import "math"
func calculateDelta(oldValue, newValue int64) int64 {
if oldValue < newValue {
return newValue - oldValue
} else {
return (math.MaxInt64 - oldValue) + (newValue - math.MinInt64) + 1
}
}

View File

@ -1,83 +0,0 @@
// includes code from
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
// Copyright 2012 Richard Crowley. All rights reserved.
package metrics
import "sync/atomic"
// Gauges hold an int64 value that can be set arbitrarily.
type Gauge interface {
Metric
Update(int64)
Value() int64
}
func NewGauge(meta *MetricMeta) Gauge {
if UseNilMetrics {
return NilGauge{}
}
return &StandardGauge{
MetricMeta: meta,
value: 0,
}
}
func RegGauge(name string, tagStrings ...string) Gauge {
tr := NewGauge(NewMetricMeta(name, tagStrings))
MetricStats.Register(tr)
return tr
}
// GaugeSnapshot is a read-only copy of another Gauge.
type GaugeSnapshot struct {
value int64
*MetricMeta
}
// Snapshot returns the snapshot.
func (g GaugeSnapshot) Snapshot() Metric { return g }
// Update panics.
func (GaugeSnapshot) Update(int64) {
panic("Update called on a GaugeSnapshot")
}
// Value returns the value at the time the snapshot was taken.
func (g GaugeSnapshot) Value() int64 { return g.value }
// NilGauge is a no-op Gauge.
type NilGauge struct{ *MetricMeta }
// Snapshot is a no-op.
func (NilGauge) Snapshot() Metric { return NilGauge{} }
// Update is a no-op.
func (NilGauge) Update(v int64) {}
// Value is a no-op.
func (NilGauge) Value() int64 { return 0 }
// StandardGauge is the standard implementation of a Gauge and uses the
// sync/atomic package to manage a single int64 value.
// atomic needs 64-bit aligned memory which is ensure for first word
type StandardGauge struct {
value int64
*MetricMeta
}
// Snapshot returns a read-only copy of the gauge.
func (g *StandardGauge) Snapshot() Metric {
return GaugeSnapshot{MetricMeta: g.MetricMeta, value: g.value}
}
// Update updates the gauge's value.
func (g *StandardGauge) Update(v int64) {
atomic.StoreInt64(&g.value, v)
}
// Value returns the gauge's current value.
func (g *StandardGauge) Value() int64 {
return atomic.LoadInt64(&g.value)
}

View File

@ -1,107 +0,0 @@
package metrics
import (
"bytes"
"fmt"
"net"
"strings"
"time"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/setting"
)
type GraphitePublisher struct {
address string
protocol string
prefix string
prevCounts map[string]int64
}
func CreateGraphitePublisher() (*GraphitePublisher, error) {
graphiteSection, err := setting.Cfg.GetSection("metrics.graphite")
if err != nil {
return nil, nil
}
address := graphiteSection.Key("address").String()
if address == "" {
return nil, nil
}
publisher := &GraphitePublisher{}
publisher.prevCounts = make(map[string]int64)
publisher.protocol = "tcp"
publisher.prefix = graphiteSection.Key("prefix").MustString("prod.grafana.%(instance_name)s")
publisher.address = address
safeInstanceName := strings.Replace(setting.InstanceName, ".", "_", -1)
prefix := graphiteSection.Key("prefix").Value()
if prefix == "" {
prefix = "prod.grafana.%(instance_name)s."
}
publisher.prefix = strings.Replace(prefix, "%(instance_name)s", safeInstanceName, -1)
return publisher, nil
}
func (this *GraphitePublisher) Publish(metrics []Metric) {
conn, err := net.DialTimeout(this.protocol, this.address, time.Second*5)
if err != nil {
log.Error(3, "Metrics: GraphitePublisher: Failed to connect to %s!", err)
return
}
buf := bytes.NewBufferString("")
now := time.Now().Unix()
for _, m := range metrics {
metricName := this.prefix + m.Name() + m.StringifyTags()
switch metric := m.(type) {
case Counter:
this.addCount(buf, metricName+".count", metric.Count(), now)
case Gauge:
this.addCount(buf, metricName, metric.Value(), now)
case Timer:
percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
this.addCount(buf, metricName+".count", metric.Count(), now)
this.addInt(buf, metricName+".max", metric.Max(), now)
this.addInt(buf, metricName+".min", metric.Min(), now)
this.addFloat(buf, metricName+".mean", metric.Mean(), now)
this.addFloat(buf, metricName+".std", metric.StdDev(), now)
this.addFloat(buf, metricName+".p25", percentiles[0], now)
this.addFloat(buf, metricName+".p75", percentiles[1], now)
this.addFloat(buf, metricName+".p90", percentiles[2], now)
this.addFloat(buf, metricName+".p99", percentiles[3], now)
}
}
log.Trace("Metrics: GraphitePublisher.Publish() \n%s", buf)
_, err = conn.Write(buf.Bytes())
if err != nil {
log.Error(3, "Metrics: GraphitePublisher: Failed to send metrics! %s", err)
}
}
func (this *GraphitePublisher) addInt(buf *bytes.Buffer, metric string, value int64, now int64) {
buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, value, now))
}
func (this *GraphitePublisher) addFloat(buf *bytes.Buffer, metric string, value float64, now int64) {
buf.WriteString(fmt.Sprintf("%s %f %d\n", metric, value, now))
}
func (this *GraphitePublisher) addCount(buf *bytes.Buffer, metric string, value int64, now int64) {
delta := value
if last, ok := this.prevCounts[metric]; ok {
delta = calculateDelta(last, value)
}
this.prevCounts[metric] = value
buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, delta, now))
}

View File

@ -1,77 +0,0 @@
package metrics
import (
"testing"
"github.com/grafana/grafana/pkg/setting"
. "github.com/smartystreets/goconvey/convey"
)
func TestGraphitePublisher(t *testing.T) {
setting.CustomInitPath = "conf/does_not_exist.ini"
Convey("Test graphite prefix replacement", t, func() {
var err error
err = setting.NewConfigContext(&setting.CommandLineArgs{
HomePath: "../../",
})
So(err, ShouldBeNil)
sec, err := setting.Cfg.NewSection("metrics.graphite")
sec.NewKey("prefix", "prod.grafana.%(instance_name)s.")
sec.NewKey("address", "localhost:2001")
So(err, ShouldBeNil)
setting.InstanceName = "hostname.with.dots.com"
publisher, err := CreateGraphitePublisher()
So(err, ShouldBeNil)
So(publisher, ShouldNotBeNil)
So(publisher.prefix, ShouldEqual, "prod.grafana.hostname_with_dots_com.")
So(publisher.address, ShouldEqual, "localhost:2001")
})
Convey("Test graphite publisher default prefix", t, func() {
var err error
err = setting.NewConfigContext(&setting.CommandLineArgs{
HomePath: "../../",
})
So(err, ShouldBeNil)
sec, err := setting.Cfg.NewSection("metrics.graphite")
sec.NewKey("address", "localhost:2001")
So(err, ShouldBeNil)
setting.InstanceName = "hostname.with.dots.com"
publisher, err := CreateGraphitePublisher()
So(err, ShouldBeNil)
So(publisher, ShouldNotBeNil)
So(publisher.prefix, ShouldEqual, "prod.grafana.hostname_with_dots_com.")
So(publisher.address, ShouldEqual, "localhost:2001")
})
Convey("Test graphite publisher default values", t, func() {
var err error
err = setting.NewConfigContext(&setting.CommandLineArgs{
HomePath: "../../",
})
So(err, ShouldBeNil)
_, err = setting.Cfg.NewSection("metrics.graphite")
publisher, err := CreateGraphitePublisher()
So(err, ShouldBeNil)
So(publisher, ShouldBeNil)
})
}

View File

@ -0,0 +1,412 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package graphite provides a bridge to push Prometheus metrics to a Graphite
// server.
package graphitebridge
import (
"bufio"
"errors"
"fmt"
"io"
"math"
"net"
"sort"
"strings"
"time"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
)
const (
defaultInterval = 15 * time.Second
millisecondsPerSecond = 1000
)
// HandlerErrorHandling defines how a Handler serving metrics will handle
// errors.
type HandlerErrorHandling int
// These constants cause handlers serving metrics to behave as described if
// errors are encountered.
const (
// Ignore errors and try to push as many metrics to Graphite as possible.
ContinueOnError HandlerErrorHandling = iota
// Abort the push to Graphite upon the first error encountered.
AbortOnError
)
var metricCategoryPrefix []string = []string{
"proxy_",
"api_",
"page_",
"alerting_",
"aws_",
"db_",
"stat_",
"go_",
"process_"}
var trimMetricPrefix []string = []string{"grafana_"}
// Config defines the Graphite bridge config.
type Config struct {
// The url to push data to. Required.
URL string
// The prefix for the pushed Graphite metrics. Defaults to empty string.
Prefix string
// The interval to use for pushing data to Graphite. Defaults to 15 seconds.
Interval time.Duration
// The timeout for pushing metrics to Graphite. Defaults to 15 seconds.
Timeout time.Duration
// The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer.
Gatherer prometheus.Gatherer
// The logger that messages are written to. Defaults to no logging.
Logger Logger
// ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided Logger
// is not nil.
ErrorHandling HandlerErrorHandling
// Graphite does not support ever increasing counter the same way
// prometheus does. Rollups and ingestion might cannot handle ever
// increasing counters. This option allows enabled the caller to
// calculate the delta by saving the last sent counter in memory
// and subtraction it from the collected value before sending.
CountersAsDelta bool
}
// Bridge pushes metrics to the configured Graphite server.
type Bridge struct {
url string
prefix string
countersAsDetlas bool
interval time.Duration
timeout time.Duration
errorHandling HandlerErrorHandling
logger Logger
g prometheus.Gatherer
lastValue map[model.Fingerprint]float64
}
// Logger is the minimal interface Bridge needs for logging. Note that
// log.Logger from the standard library implements this interface, and it is
// easy to implement by custom loggers, if they don't do so already anyway.
type Logger interface {
Println(v ...interface{})
}
// NewBridge returns a pointer to a new Bridge struct.
func NewBridge(c *Config) (*Bridge, error) {
b := &Bridge{}
if c.URL == "" {
return nil, errors.New("missing URL")
}
b.url = c.URL
if c.Gatherer == nil {
b.g = prometheus.DefaultGatherer
} else {
b.g = c.Gatherer
}
if c.Logger != nil {
b.logger = c.Logger
}
if c.Prefix != "" {
b.prefix = c.Prefix
}
var z time.Duration
if c.Interval == z {
b.interval = defaultInterval
} else {
b.interval = c.Interval
}
if c.Timeout == z {
b.timeout = defaultInterval
} else {
b.timeout = c.Timeout
}
b.errorHandling = c.ErrorHandling
b.lastValue = map[model.Fingerprint]float64{}
b.countersAsDetlas = c.CountersAsDelta
return b, nil
}
// Run starts the event loop that pushes Prometheus metrics to Graphite at the
// configured interval.
func (b *Bridge) Run(ctx context.Context) {
ticker := time.NewTicker(b.interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := b.Push(); err != nil && b.logger != nil {
b.logger.Println("error pushing to Graphite:", err)
}
case <-ctx.Done():
return
}
}
}
// Push pushes Prometheus metrics to the configured Graphite server.
func (b *Bridge) Push() error {
mfs, err := b.g.Gather()
if err != nil || len(mfs) == 0 {
switch b.errorHandling {
case AbortOnError:
return err
case ContinueOnError:
if b.logger != nil {
b.logger.Println("continue on error:", err)
}
default:
panic("unrecognized error handling value")
}
}
conn, err := net.DialTimeout("tcp", b.url, b.timeout)
if err != nil {
return err
}
defer conn.Close()
return b.writeMetrics(conn, mfs, b.prefix, model.Now())
}
func (b *Bridge) writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error {
for _, mf := range mfs {
vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
Timestamp: now,
}, mf)
if err != nil {
return err
}
buf := bufio.NewWriter(w)
for _, s := range vec {
if math.IsNaN(float64(s.Value)) {
continue
}
if err := writePrefix(buf, prefix); err != nil {
return err
}
if err := writeMetric(buf, s.Metric, mf); err != nil {
return err
}
value := b.replaceCounterWithDelta(mf, s.Metric, s.Value)
if _, err := fmt.Fprintf(buf, " %g %d\n", value, int64(s.Timestamp)/millisecondsPerSecond); err != nil {
return err
}
if err := buf.Flush(); err != nil {
return err
}
}
}
return nil
}
func writeMetric(buf *bufio.Writer, m model.Metric, mf *dto.MetricFamily) error {
metricName, hasName := m[model.MetricNameLabel]
numLabels := len(m) - 1
if !hasName {
numLabels = len(m)
}
for _, v := range trimMetricPrefix {
if strings.HasPrefix(string(metricName), v) {
metricName = model.LabelValue(strings.Replace(string(metricName), v, "", 1))
}
}
for _, v := range metricCategoryPrefix {
if strings.HasPrefix(string(metricName), v) {
group := strings.Replace(v, "_", " ", 1)
metricName = model.LabelValue(strings.Replace(string(metricName), v, group, 1))
}
}
labelStrings := make([]string, 0, numLabels)
for label, value := range m {
if label != model.MetricNameLabel {
labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value)))
}
}
var err error
switch numLabels {
case 0:
if hasName {
if err := writeSanitized(buf, string(metricName)); err != nil {
return err
}
}
default:
sort.Strings(labelStrings)
if err = writeSanitized(buf, string(metricName)); err != nil {
return err
}
for _, s := range labelStrings {
if err = buf.WriteByte('.'); err != nil {
return err
}
if err = writeSanitized(buf, s); err != nil {
return err
}
}
}
if err = addExtentionConventionForRollups(buf, mf, m); err != nil {
return err
}
return nil
}
func addExtentionConventionForRollups(buf *bufio.Writer, mf *dto.MetricFamily, m model.Metric) error {
// Adding `.count` `.sum` suffix makes it possible to configure
// different rollup strategies based on metric type
mfType := mf.GetType()
var err error
if mfType == dto.MetricType_COUNTER {
if _, err = fmt.Fprint(buf, ".count"); err != nil {
return err
}
}
if mfType == dto.MetricType_SUMMARY || mfType == dto.MetricType_HISTOGRAM {
if strings.HasSuffix(string(m[model.MetricNameLabel]), "_count") {
if _, err = fmt.Fprint(buf, ".count"); err != nil {
return err
}
}
}
if mfType == dto.MetricType_HISTOGRAM {
if strings.HasSuffix(string(m[model.MetricNameLabel]), "_sum") {
if _, err = fmt.Fprint(buf, ".sum"); err != nil {
return err
}
}
}
return nil
}
func writePrefix(buf *bufio.Writer, s string) error {
for _, c := range s {
if _, err := buf.WriteRune(replaceInvalid(c)); err != nil {
return err
}
}
return nil
}
func writeSanitized(buf *bufio.Writer, s string) error {
prevUnderscore := false
for _, c := range s {
c = replaceInvalidRune(c)
if c == '_' {
if prevUnderscore {
continue
}
prevUnderscore = true
} else {
prevUnderscore = false
}
if _, err := buf.WriteRune(c); err != nil {
return err
}
}
return nil
}
func replaceInvalid(c rune) rune {
if c == ' ' || c == '.' {
return '.'
}
return replaceInvalidRune(c)
}
func replaceInvalidRune(c rune) rune {
if c == ' ' {
return '.'
}
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '-' || c == '_' || c == ':' || (c >= '0' && c <= '9')) {
return '_'
}
return c
}
func (b *Bridge) replaceCounterWithDelta(mf *dto.MetricFamily, metric model.Metric, value model.SampleValue) float64 {
if !b.countersAsDetlas {
return float64(value)
}
mfType := mf.GetType()
if mfType == dto.MetricType_COUNTER {
return b.returnDelta(metric, value)
}
if mfType == dto.MetricType_SUMMARY {
if strings.HasSuffix(string(metric[model.MetricNameLabel]), "_count") {
return b.returnDelta(metric, value)
}
}
return float64(value)
}
func (b *Bridge) returnDelta(metric model.Metric, value model.SampleValue) float64 {
key := metric.Fingerprint()
_, exists := b.lastValue[key]
if !exists {
b.lastValue[key] = 0
}
delta := float64(value) - b.lastValue[key]
b.lastValue[key] = float64(value)
return delta
}

View File

@ -0,0 +1,568 @@
package graphitebridge
import (
"bufio"
"bytes"
"io"
"net"
"regexp"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
)
func TestCountersAsDelta(t *testing.T) {
b, _ := NewBridge(&Config{
URL: "localhost:12345",
CountersAsDelta: true,
})
ty := dto.MetricType(0)
mf := &dto.MetricFamily{
Type: &ty,
Metric: []*dto.Metric{},
}
m := model.Metric{}
var want float64
var got float64
want = float64(1)
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(1))
if got != want {
t.Fatalf("want %v got %v", want, got)
}
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(2))
if got != want {
t.Fatalf("want %v got %v", want, got)
}
}
func TestCountersAsDeltaDisabled(t *testing.T) {
b, _ := NewBridge(&Config{
URL: "localhost:12345",
CountersAsDelta: false,
})
ty := dto.MetricType(0)
mf := &dto.MetricFamily{
Type: &ty,
Metric: []*dto.Metric{},
}
m := model.Metric{}
var want float64
var got float64
want = float64(1)
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(1))
if got != want {
t.Fatalf("want %v got %v", want, got)
}
want = float64(2)
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(2))
if got != want {
t.Fatalf("want %v got %v", want, got)
}
}
func TestSanitize(t *testing.T) {
testCases := []struct {
in, out string
}{
{in: "hello", out: "hello"},
{in: "hE/l1o", out: "hE_l1o"},
{in: "he,*ll(.o", out: "he_ll_o"},
{in: "hello_there%^&", out: "hello_there_"},
}
var buf bytes.Buffer
w := bufio.NewWriter(&buf)
for i, tc := range testCases {
if err := writeSanitized(w, tc.in); err != nil {
t.Fatalf("write failed: %v", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush failed: %v", err)
}
if want, got := tc.out, buf.String(); want != got {
t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
}
buf.Reset()
}
}
func TestSanitizePrefix(t *testing.T) {
testCases := []struct {
in, out string
}{
{in: "service.prod.", out: "service.prod."},
{in: "service.prod", out: "service.prod"},
}
var buf bytes.Buffer
w := bufio.NewWriter(&buf)
for i, tc := range testCases {
if err := writePrefix(w, tc.in); err != nil {
t.Fatalf("write failed: %v", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush failed: %v", err)
}
if want, got := tc.out, buf.String(); want != got {
t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
}
buf.Reset()
}
}
func TestWriteSummary(t *testing.T) {
sumVec := prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "name",
Help: "docstring",
Namespace: "grafana",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"labelname"},
)
reg := prometheus.NewRegistry()
reg.MustRegister(sumVec)
b, err := NewBridge(&Config{
URL: "localhost:8080",
Gatherer: reg,
CountersAsDelta: true,
})
if err != nil {
t.Fatalf("cannot create bridge. err: %v", err)
}
sumVec.WithLabelValues("val1").Observe(float64(10))
sumVec.WithLabelValues("val1").Observe(float64(20))
sumVec.WithLabelValues("val1").Observe(float64(30))
sumVec.WithLabelValues("val2").Observe(float64(20))
sumVec.WithLabelValues("val2").Observe(float64(30))
sumVec.WithLabelValues("val2").Observe(float64(40))
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
now := model.Time(1477043083)
var buf bytes.Buffer
err = b.writeMetrics(&buf, mfs, "prefix.", now)
if err != nil {
t.Fatalf("error: %v", err)
}
want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043
prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043
prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043
prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
prefix.name_count.constname.constvalue.labelname.val1.count 3 1477043
prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043
prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043
prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043
prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
prefix.name_count.constname.constvalue.labelname.val2.count 3 1477043
`
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
}
func TestWriteHistogram(t *testing.T) {
histVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "name",
Help: "docstring",
Namespace: "grafana",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
Buckets: []float64{0.01, 0.02, 0.05, 0.1},
},
[]string{"labelname"},
)
reg := prometheus.NewRegistry()
reg.MustRegister(histVec)
b, err := NewBridge(&Config{
URL: "localhost:8080",
Gatherer: reg,
CountersAsDelta: true,
})
if err != nil {
t.Fatalf("error creating bridge: %v", err)
}
histVec.WithLabelValues("val1").Observe(float64(10))
histVec.WithLabelValues("val1").Observe(float64(20))
histVec.WithLabelValues("val1").Observe(float64(30))
histVec.WithLabelValues("val2").Observe(float64(20))
histVec.WithLabelValues("val2").Observe(float64(30))
histVec.WithLabelValues("val2").Observe(float64(40))
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
now := model.Time(1477043083)
var buf bytes.Buffer
err = b.writeMetrics(&buf, mfs, "prefix.", now)
if err != nil {
t.Fatalf("error: %v", err)
}
want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043
prefix.name_sum.constname.constvalue.labelname.val1.sum 60 1477043
prefix.name_count.constname.constvalue.labelname.val1.count 3 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043
prefix.name_sum.constname.constvalue.labelname.val2.sum 90 1477043
prefix.name_count.constname.constvalue.labelname.val2.count 3 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043
`
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
}
func TestCounterVec(t *testing.T) {
cntVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "page_response",
Namespace: "grafana",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
},
[]string{"labelname"},
)
apicntVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "api_response",
Namespace: "grafana",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
},
[]string{"labelname"},
)
reg := prometheus.NewRegistry()
reg.MustRegister(cntVec)
reg.MustRegister(apicntVec)
cntVec.WithLabelValues("val1").Inc()
cntVec.WithLabelValues("val2").Inc()
apicntVec.WithLabelValues("val1").Inc()
apicntVec.WithLabelValues("val2").Inc()
b, err := NewBridge(&Config{
URL: "localhost:8080",
Gatherer: reg,
CountersAsDelta: true,
})
if err != nil {
t.Fatalf("error creating bridge: %v", err)
}
// first collect
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
var buf bytes.Buffer
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083))
if err != nil {
t.Fatalf("error: %v", err)
}
want := `prefix.api.response.constname.constvalue.labelname.val1.count 1 1477043
prefix.api.response.constname.constvalue.labelname.val2.count 1 1477043
prefix.page.response.constname.constvalue.labelname.val1.count 1 1477043
prefix.page.response.constname.constvalue.labelname.val2.count 1 1477043
`
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
//next collect
cntVec.WithLabelValues("val1").Inc()
cntVec.WithLabelValues("val2").Inc()
apicntVec.WithLabelValues("val1").Inc()
apicntVec.WithLabelValues("val2").Inc()
mfs, err = reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
buf = bytes.Buffer{}
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477053083))
if err != nil {
t.Fatalf("error: %v", err)
}
want2 := `prefix.api.response.constname.constvalue.labelname.val1.count 1 1477053
prefix.api.response.constname.constvalue.labelname.val2.count 1 1477053
prefix.page.response.constname.constvalue.labelname.val1.count 1 1477053
prefix.page.response.constname.constvalue.labelname.val2.count 1 1477053
`
if got := buf.String(); want2 != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want2, got)
}
}
func TestCounter(t *testing.T) {
cntVec := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "page_response",
Help: "docstring",
Namespace: "grafana",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
})
reg := prometheus.NewRegistry()
reg.MustRegister(cntVec)
cntVec.Inc()
b, err := NewBridge(&Config{
URL: "localhost:8080",
Gatherer: reg,
CountersAsDelta: true,
})
if err != nil {
t.Fatalf("error creating bridge: %v", err)
}
// first collect
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
var buf bytes.Buffer
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083))
if err != nil {
t.Fatalf("error: %v", err)
}
want := "prefix.page.response.constname.constvalue.count 1 1477043\n"
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
//next collect
cntVec.Inc()
mfs, err = reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
buf = bytes.Buffer{}
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477053083))
if err != nil {
t.Fatalf("error: %v", err)
}
want2 := "prefix.page.response.constname.constvalue.count 1 1477053\n"
if got := buf.String(); want2 != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want2, got)
}
}
func TestTrimGrafanaNamespace(t *testing.T) {
cntVec := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "http_request_total",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
})
reg := prometheus.NewRegistry()
reg.MustRegister(cntVec)
cntVec.Inc()
b, err := NewBridge(&Config{
URL: "localhost:8080",
Gatherer: reg,
CountersAsDelta: true,
})
if err != nil {
t.Fatalf("error creating bridge: %v", err)
}
// first collect
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
var buf bytes.Buffer
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083))
if err != nil {
t.Fatalf("error: %v", err)
}
want := "prefix.http_request_total.constname.constvalue.count 1 1477043\n"
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
}
func TestSkipNanValues(t *testing.T) {
cntVec := prometheus.NewSummary(
prometheus.SummaryOpts{
Name: "http_request_total",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
})
reg := prometheus.NewRegistry()
reg.MustRegister(cntVec)
b, err := NewBridge(&Config{
URL: "localhost:8080",
Gatherer: reg,
CountersAsDelta: true,
})
if err != nil {
t.Fatalf("error creating bridge: %v", err)
}
// first collect
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
var buf bytes.Buffer
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083))
if err != nil {
t.Fatalf("error: %v", err)
}
want := `prefix.http_request_total_sum.constname.constvalue 0 1477043
prefix.http_request_total_count.constname.constvalue.count 0 1477043
`
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
}
func TestPush(t *testing.T) {
reg := prometheus.NewRegistry()
cntVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "name",
Help: "docstring",
Namespace: "grafana",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
},
[]string{"labelname"},
)
cntVec.WithLabelValues("val1").Inc()
cntVec.WithLabelValues("val2").Inc()
reg.MustRegister(cntVec)
host := "localhost"
port := ":56789"
b, err := NewBridge(&Config{
URL: host + port,
Gatherer: reg,
Prefix: "prefix.",
})
if err != nil {
t.Fatalf("error creating bridge: %v", err)
}
nmg, err := newMockGraphite(port)
if err != nil {
t.Fatalf("error creating mock graphite: %v", err)
}
defer nmg.Close()
err = b.Push()
if err != nil {
t.Fatalf("error pushing: %v", err)
}
wants := []string{
"prefix.name.constname.constvalue.labelname.val1.count 1",
"prefix.name.constname.constvalue.labelname.val2.count 1",
}
select {
case got := <-nmg.readc:
for _, want := range wants {
matched, err := regexp.MatchString(want, got)
if err != nil {
t.Fatalf("error pushing: %v", err)
}
if !matched {
t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got)
}
}
return
case err := <-nmg.errc:
t.Fatalf("error reading push: %v", err)
case <-time.After(50 * time.Millisecond):
t.Fatalf("no result from graphite server")
}
}
func newMockGraphite(port string) (*mockGraphite, error) {
readc := make(chan string)
errc := make(chan error)
ln, err := net.Listen("tcp", port)
if err != nil {
return nil, err
}
go func() {
conn, err := ln.Accept()
if err != nil {
errc <- err
}
var b bytes.Buffer
io.Copy(&b, conn)
readc <- b.String()
}()
return &mockGraphite{
readc: readc,
errc: errc,
Listener: ln,
}, nil
}
type mockGraphite struct {
readc chan string
errc chan error
net.Listener
}

View File

@ -1,189 +0,0 @@
// includes code from
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
// Copyright 2012 Richard Crowley. All rights reserved.
package metrics
// Histograms calculate distribution statistics from a series of int64 values.
type Histogram interface {
Metric
Clear()
Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
StdDev() float64
Sum() int64
Update(int64)
Variance() float64
}
func NewHistogram(meta *MetricMeta, s Sample) Histogram {
return &StandardHistogram{
MetricMeta: meta,
sample: s,
}
}
// HistogramSnapshot is a read-only copy of another Histogram.
type HistogramSnapshot struct {
*MetricMeta
sample *SampleSnapshot
}
// Clear panics.
func (*HistogramSnapshot) Clear() {
panic("Clear called on a HistogramSnapshot")
}
// Count returns the number of samples recorded at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
// Max returns the maximum value in the sample at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
// Mean returns the mean of the values in the sample at the time the snapshot
// was taken.
func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
// Min returns the minimum value in the sample at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
// Percentile returns an arbitrary percentile of values in the sample at the
// time the snapshot was taken.
func (h *HistogramSnapshot) Percentile(p float64) float64 {
return h.sample.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the sample
// at the time the snapshot was taken.
func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
return h.sample.Percentiles(ps)
}
// Sample returns the Sample underlying the histogram.
func (h *HistogramSnapshot) Sample() Sample { return h.sample }
// Snapshot returns the snapshot.
func (h *HistogramSnapshot) Snapshot() Metric { return h }
// StdDev returns the standard deviation of the values in the sample at the
// time the snapshot was taken.
func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
// Sum returns the sum in the sample at the time the snapshot was taken.
func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
// Update panics.
func (*HistogramSnapshot) Update(int64) {
panic("Update called on a HistogramSnapshot")
}
// Variance returns the variance of inputs at the time the snapshot was taken.
func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
// NilHistogram is a no-op Histogram.
type NilHistogram struct {
*MetricMeta
}
// Clear is a no-op.
func (NilHistogram) Clear() {}
// Count is a no-op.
func (NilHistogram) Count() int64 { return 0 }
// Max is a no-op.
func (NilHistogram) Max() int64 { return 0 }
// Mean is a no-op.
func (NilHistogram) Mean() float64 { return 0.0 }
// Min is a no-op.
func (NilHistogram) Min() int64 { return 0 }
// Percentile is a no-op.
func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
// Percentiles is a no-op.
func (NilHistogram) Percentiles(ps []float64) []float64 {
return make([]float64, len(ps))
}
// Sample is a no-op.
func (NilHistogram) Sample() Sample { return NilSample{} }
// Snapshot is a no-op.
func (n NilHistogram) Snapshot() Metric { return n }
// StdDev is a no-op.
func (NilHistogram) StdDev() float64 { return 0.0 }
// Sum is a no-op.
func (NilHistogram) Sum() int64 { return 0 }
// Update is a no-op.
func (NilHistogram) Update(v int64) {}
// Variance is a no-op.
func (NilHistogram) Variance() float64 { return 0.0 }
// StandardHistogram is the standard implementation of a Histogram and uses a
// Sample to bound its memory use.
type StandardHistogram struct {
*MetricMeta
sample Sample
}
// Clear clears the histogram and its sample.
func (h *StandardHistogram) Clear() { h.sample.Clear() }
// Count returns the number of samples recorded since the histogram was last
// cleared.
func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
// Max returns the maximum value in the sample.
func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
// Mean returns the mean of the values in the sample.
func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
// Min returns the minimum value in the sample.
func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
// Percentile returns an arbitrary percentile of the values in the sample.
func (h *StandardHistogram) Percentile(p float64) float64 {
return h.sample.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of the values in the
// sample.
func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
return h.sample.Percentiles(ps)
}
// Sample returns the Sample underlying the histogram.
func (h *StandardHistogram) Sample() Sample { return h.sample }
// Snapshot returns a read-only copy of the histogram.
func (h *StandardHistogram) Snapshot() Metric {
return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
}
// StdDev returns the standard deviation of the values in the sample.
func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
// Sum returns the sum in the sample.
func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
// Update samples a new value.
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
// Variance returns the variance of the values in the sample.
func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }

View File

@ -1,90 +0,0 @@
// includes code from
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
// Copyright 2012 Richard Crowley. All rights reserved.
package metrics
import "testing"
func BenchmarkHistogram(b *testing.B) {
h := NewHistogram(nil, NewUniformSample(100))
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Update(int64(i))
}
}
func TestHistogram10000(t *testing.T) {
h := NewHistogram(nil, NewUniformSample(100000))
for i := 1; i <= 10000; i++ {
h.Update(int64(i))
}
testHistogram10000(t, h)
}
func TestHistogramEmpty(t *testing.T) {
h := NewHistogram(nil, NewUniformSample(100))
if count := h.Count(); 0 != count {
t.Errorf("h.Count(): 0 != %v\n", count)
}
if min := h.Min(); 0 != min {
t.Errorf("h.Min(): 0 != %v\n", min)
}
if max := h.Max(); 0 != max {
t.Errorf("h.Max(): 0 != %v\n", max)
}
if mean := h.Mean(); 0.0 != mean {
t.Errorf("h.Mean(): 0.0 != %v\n", mean)
}
if stdDev := h.StdDev(); 0.0 != stdDev {
t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
}
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
if 0.0 != ps[0] {
t.Errorf("median: 0.0 != %v\n", ps[0])
}
if 0.0 != ps[1] {
t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
}
if 0.0 != ps[2] {
t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
}
}
func TestHistogramSnapshot(t *testing.T) {
h := NewHistogram(nil, NewUniformSample(100000))
for i := 1; i <= 10000; i++ {
h.Update(int64(i))
}
snapshot := h.Snapshot().(Histogram)
h.Update(0)
testHistogram10000(t, snapshot)
}
func testHistogram10000(t *testing.T, h Histogram) {
if count := h.Count(); 10000 != count {
t.Errorf("h.Count(): 10000 != %v\n", count)
}
if min := h.Min(); 1 != min {
t.Errorf("h.Min(): 1 != %v\n", min)
}
if max := h.Max(); 10000 != max {
t.Errorf("h.Max(): 10000 != %v\n", max)
}
if mean := h.Mean(); 5000.5 != mean {
t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
}
if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
}
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
if 5000.5 != ps[0] {
t.Errorf("median: 5000.5 != %v\n", ps[0])
}
if 7500.75 != ps[1] {
t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
}
if 9900.99 != ps[2] {
t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
}
}

38
pkg/metrics/init.go Normal file
View File

@ -0,0 +1,38 @@
package metrics
import (
"context"
ini "gopkg.in/ini.v1"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/metrics/graphitebridge"
)
var metricsLogger log.Logger = log.New("metrics")
type logWrapper struct {
logger log.Logger
}
func (lw *logWrapper) Println(v ...interface{}) {
lw.logger.Info("graphite metric bridge", v...)
}
func Init(file *ini.File) {
cfg := ReadSettings(file)
internalInit(cfg)
}
func internalInit(settings *MetricSettings) {
initMetricVars(settings)
if settings.GraphiteBridgeConfig != nil {
bridge, err := graphitebridge.NewBridge(settings.GraphiteBridgeConfig)
if err != nil {
metricsLogger.Error("failed to create graphite bridge", "error", err)
} else {
go bridge.Run(context.Background())
}
}
}

View File

@ -1,221 +0,0 @@
// includes code from
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
// Copyright 2012 Richard Crowley. All rights reserved.
package metrics
import (
"sync"
"time"
)
// Meters count events to produce exponentially-weighted moving average rates
// at one-, five-, and fifteen-minutes and a mean rate.
type Meter interface {
Metric
Count() int64
Mark(int64)
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
}
// NewMeter constructs a new StandardMeter and launches a goroutine.
func NewMeter(meta *MetricMeta) Meter {
if UseNilMetrics {
return NilMeter{}
}
m := newStandardMeter(meta)
arbiter.Lock()
defer arbiter.Unlock()
arbiter.meters = append(arbiter.meters, m)
if !arbiter.started {
arbiter.started = true
go arbiter.tick()
}
return m
}
type MeterSnapshot struct {
*MetricMeta
count int64
rate1, rate5, rate15, rateMean float64
}
// Count returns the count of events at the time the snapshot was taken.
func (m *MeterSnapshot) Count() int64 { return m.count }
// Mark panics.
func (*MeterSnapshot) Mark(n int64) {
panic("Mark called on a MeterSnapshot")
}
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
// Snapshot returns the snapshot.
func (m *MeterSnapshot) Snapshot() Metric { return m }
// NilMeter is a no-op Meter.
type NilMeter struct{ *MetricMeta }
// Count is a no-op.
func (NilMeter) Count() int64 { return 0 }
// Mark is a no-op.
func (NilMeter) Mark(n int64) {}
// Rate1 is a no-op.
func (NilMeter) Rate1() float64 { return 0.0 }
// Rate5 is a no-op.
func (NilMeter) Rate5() float64 { return 0.0 }
// Rate15is a no-op.
func (NilMeter) Rate15() float64 { return 0.0 }
// RateMean is a no-op.
func (NilMeter) RateMean() float64 { return 0.0 }
// Snapshot is a no-op.
func (NilMeter) Snapshot() Metric { return NilMeter{} }
// StandardMeter is the standard implementation of a Meter.
type StandardMeter struct {
*MetricMeta
lock sync.RWMutex
snapshot *MeterSnapshot
a1, a5, a15 EWMA
startTime time.Time
}
func newStandardMeter(meta *MetricMeta) *StandardMeter {
return &StandardMeter{
MetricMeta: meta,
snapshot: &MeterSnapshot{MetricMeta: meta},
a1: NewEWMA1(),
a5: NewEWMA5(),
a15: NewEWMA15(),
startTime: time.Now(),
}
}
// Count returns the number of events recorded.
func (m *StandardMeter) Count() int64 {
m.lock.RLock()
count := m.snapshot.count
m.lock.RUnlock()
return count
}
// Mark records the occurrence of n events.
func (m *StandardMeter) Mark(n int64) {
m.lock.Lock()
defer m.lock.Unlock()
m.snapshot.count += n
m.a1.Update(n)
m.a5.Update(n)
m.a15.Update(n)
m.updateSnapshot()
}
// Rate1 returns the one-minute moving average rate of events per second.
func (m *StandardMeter) Rate1() float64 {
m.lock.RLock()
rate1 := m.snapshot.rate1
m.lock.RUnlock()
return rate1
}
// Rate5 returns the five-minute moving average rate of events per second.
func (m *StandardMeter) Rate5() float64 {
m.lock.RLock()
rate5 := m.snapshot.rate5
m.lock.RUnlock()
return rate5
}
// Rate15 returns the fifteen-minute moving average rate of events per second.
func (m *StandardMeter) Rate15() float64 {
m.lock.RLock()
rate15 := m.snapshot.rate15
m.lock.RUnlock()
return rate15
}
// RateMean returns the meter's mean rate of events per second.
func (m *StandardMeter) RateMean() float64 {
m.lock.RLock()
rateMean := m.snapshot.rateMean
m.lock.RUnlock()
return rateMean
}
// Snapshot returns a read-only copy of the meter.
func (m *StandardMeter) Snapshot() Metric {
m.lock.RLock()
snapshot := *m.snapshot
m.lock.RUnlock()
return &snapshot
}
func (m *StandardMeter) updateSnapshot() {
// should run with write lock held on m.lock
snapshot := m.snapshot
snapshot.rate1 = m.a1.Rate()
snapshot.rate5 = m.a5.Rate()
snapshot.rate15 = m.a15.Rate()
snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
}
func (m *StandardMeter) tick() {
m.lock.Lock()
defer m.lock.Unlock()
m.a1.Tick()
m.a5.Tick()
m.a15.Tick()
m.updateSnapshot()
}
type meterArbiter struct {
sync.RWMutex
started bool
meters []*StandardMeter
ticker *time.Ticker
}
var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
// Ticks meters on the scheduled interval
func (ma *meterArbiter) tick() {
for {
select {
case <-ma.ticker.C:
ma.tickMeters()
}
}
}
func (ma *meterArbiter) tickMeters() {
ma.RLock()
defer ma.RUnlock()
for _, meter := range ma.meters {
meter.tick()
}
}

View File

@ -1,146 +1,407 @@
package metrics
var MetricStats Registry
var UseNilMetrics bool
import (
"bytes"
"encoding/json"
"net/http"
"runtime"
"strings"
"time"
func init() {
// init with nil metrics
initMetricVars(&MetricSettings{})
}
var (
M_Instance_Start Counter
M_Page_Status_200 Counter
M_Page_Status_500 Counter
M_Page_Status_404 Counter
M_Page_Status_Unknown Counter
M_Api_Status_200 Counter
M_Api_Status_404 Counter
M_Api_Status_500 Counter
M_Api_Status_Unknown Counter
M_Proxy_Status_200 Counter
M_Proxy_Status_404 Counter
M_Proxy_Status_500 Counter
M_Proxy_Status_Unknown Counter
M_Api_User_SignUpStarted Counter
M_Api_User_SignUpCompleted Counter
M_Api_User_SignUpInvite Counter
M_Api_Dashboard_Save Timer
M_Api_Dashboard_Get Timer
M_Api_Dashboard_Search Timer
M_Api_Admin_User_Create Counter
M_Api_Login_Post Counter
M_Api_Login_OAuth Counter
M_Api_Org_Create Counter
M_Api_Dashboard_Snapshot_Create Counter
M_Api_Dashboard_Snapshot_External Counter
M_Api_Dashboard_Snapshot_Get Counter
M_Models_Dashboard_Insert Counter
M_Alerting_Result_State_Alerting Counter
M_Alerting_Result_State_Ok Counter
M_Alerting_Result_State_Paused Counter
M_Alerting_Result_State_NoData Counter
M_Alerting_Result_State_Pending Counter
M_Alerting_Notification_Sent_Slack Counter
M_Alerting_Notification_Sent_Email Counter
M_Alerting_Notification_Sent_Webhook Counter
M_Alerting_Notification_Sent_DingDing Counter
M_Alerting_Notification_Sent_PagerDuty Counter
M_Alerting_Notification_Sent_LINE Counter
M_Alerting_Notification_Sent_Victorops Counter
M_Alerting_Notification_Sent_OpsGenie Counter
M_Alerting_Notification_Sent_Telegram Counter
M_Alerting_Notification_Sent_Threema Counter
M_Alerting_Notification_Sent_Sensu Counter
M_Alerting_Notification_Sent_Pushover Counter
M_Aws_CloudWatch_GetMetricStatistics Counter
M_Aws_CloudWatch_ListMetrics Counter
M_DB_DataSource_QueryById Counter
// Timers
M_DataSource_ProxyReq_Timer Timer
M_Alerting_Execution_Time Timer
// StatTotals
M_Alerting_Active_Alerts Gauge
M_StatTotal_Dashboards Gauge
M_StatTotal_Users Gauge
M_StatTotal_Orgs Gauge
M_StatTotal_Playlists Gauge
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/setting"
"github.com/prometheus/client_golang/prometheus"
)
func initMetricVars(settings *MetricSettings) {
UseNilMetrics = settings.Enabled == false
MetricStats = NewRegistry()
const exporterName = "grafana"
M_Instance_Start = RegCounter("instance_start")
var (
M_Instance_Start prometheus.Counter
M_Page_Status *prometheus.CounterVec
M_Api_Status *prometheus.CounterVec
M_Proxy_Status *prometheus.CounterVec
M_Http_Request_Total *prometheus.CounterVec
M_Http_Request_Summary *prometheus.SummaryVec
M_Page_Status_200 = RegCounter("page.resp_status", "code", "200")
M_Page_Status_500 = RegCounter("page.resp_status", "code", "500")
M_Page_Status_404 = RegCounter("page.resp_status", "code", "404")
M_Page_Status_Unknown = RegCounter("page.resp_status", "code", "unknown")
M_Api_User_SignUpStarted prometheus.Counter
M_Api_User_SignUpCompleted prometheus.Counter
M_Api_User_SignUpInvite prometheus.Counter
M_Api_Dashboard_Save prometheus.Summary
M_Api_Dashboard_Get prometheus.Summary
M_Api_Dashboard_Search prometheus.Summary
M_Api_Admin_User_Create prometheus.Counter
M_Api_Login_Post prometheus.Counter
M_Api_Login_OAuth prometheus.Counter
M_Api_Org_Create prometheus.Counter
M_Api_Status_200 = RegCounter("api.resp_status", "code", "200")
M_Api_Status_404 = RegCounter("api.resp_status", "code", "404")
M_Api_Status_500 = RegCounter("api.resp_status", "code", "500")
M_Api_Status_Unknown = RegCounter("api.resp_status", "code", "unknown")
M_Proxy_Status_200 = RegCounter("proxy.resp_status", "code", "200")
M_Proxy_Status_404 = RegCounter("proxy.resp_status", "code", "404")
M_Proxy_Status_500 = RegCounter("proxy.resp_status", "code", "500")
M_Proxy_Status_Unknown = RegCounter("proxy.resp_status", "code", "unknown")
M_Api_User_SignUpStarted = RegCounter("api.user.signup_started")
M_Api_User_SignUpCompleted = RegCounter("api.user.signup_completed")
M_Api_User_SignUpInvite = RegCounter("api.user.signup_invite")
M_Api_Dashboard_Save = RegTimer("api.dashboard.save")
M_Api_Dashboard_Get = RegTimer("api.dashboard.get")
M_Api_Dashboard_Search = RegTimer("api.dashboard.search")
M_Api_Admin_User_Create = RegCounter("api.admin.user_create")
M_Api_Login_Post = RegCounter("api.login.post")
M_Api_Login_OAuth = RegCounter("api.login.oauth")
M_Api_Org_Create = RegCounter("api.org.create")
M_Api_Dashboard_Snapshot_Create = RegCounter("api.dashboard_snapshot.create")
M_Api_Dashboard_Snapshot_External = RegCounter("api.dashboard_snapshot.external")
M_Api_Dashboard_Snapshot_Get = RegCounter("api.dashboard_snapshot.get")
M_Models_Dashboard_Insert = RegCounter("models.dashboard.insert")
M_Alerting_Result_State_Alerting = RegCounter("alerting.result", "state", "alerting")
M_Alerting_Result_State_Ok = RegCounter("alerting.result", "state", "ok")
M_Alerting_Result_State_Paused = RegCounter("alerting.result", "state", "paused")
M_Alerting_Result_State_NoData = RegCounter("alerting.result", "state", "no_data")
M_Alerting_Result_State_Pending = RegCounter("alerting.result", "state", "pending")
M_Alerting_Notification_Sent_Slack = RegCounter("alerting.notifications_sent", "type", "slack")
M_Alerting_Notification_Sent_Email = RegCounter("alerting.notifications_sent", "type", "email")
M_Alerting_Notification_Sent_Webhook = RegCounter("alerting.notifications_sent", "type", "webhook")
M_Alerting_Notification_Sent_DingDing = RegCounter("alerting.notifications_sent", "type", "dingding")
M_Alerting_Notification_Sent_PagerDuty = RegCounter("alerting.notifications_sent", "type", "pagerduty")
M_Alerting_Notification_Sent_Victorops = RegCounter("alerting.notifications_sent", "type", "victorops")
M_Alerting_Notification_Sent_OpsGenie = RegCounter("alerting.notifications_sent", "type", "opsgenie")
M_Alerting_Notification_Sent_Telegram = RegCounter("alerting.notifications_sent", "type", "telegram")
M_Alerting_Notification_Sent_Threema = RegCounter("alerting.notifications_sent", "type", "threema")
M_Alerting_Notification_Sent_Sensu = RegCounter("alerting.notifications_sent", "type", "sensu")
M_Alerting_Notification_Sent_LINE = RegCounter("alerting.notifications_sent", "type", "LINE")
M_Alerting_Notification_Sent_Pushover = RegCounter("alerting.notifications_sent", "type", "pushover")
M_Aws_CloudWatch_GetMetricStatistics = RegCounter("aws.cloudwatch.get_metric_statistics")
M_Aws_CloudWatch_ListMetrics = RegCounter("aws.cloudwatch.list_metrics")
M_DB_DataSource_QueryById = RegCounter("db.datasource.query_by_id")
M_Api_Dashboard_Snapshot_Create prometheus.Counter
M_Api_Dashboard_Snapshot_External prometheus.Counter
M_Api_Dashboard_Snapshot_Get prometheus.Counter
M_Api_Dashboard_Insert prometheus.Counter
M_Alerting_Result_State *prometheus.CounterVec
M_Alerting_Notification_Sent *prometheus.CounterVec
M_Aws_CloudWatch_GetMetricStatistics prometheus.Counter
M_Aws_CloudWatch_ListMetrics prometheus.Counter
M_DB_DataSource_QueryById prometheus.Counter
// Timers
M_DataSource_ProxyReq_Timer = RegTimer("api.dataproxy.request.all")
M_Alerting_Execution_Time = RegTimer("alerting.execution_time")
M_DataSource_ProxyReq_Timer prometheus.Summary
M_Alerting_Execution_Time prometheus.Summary
// StatTotals
M_Alerting_Active_Alerts = RegGauge("alerting.active_alerts")
M_StatTotal_Dashboards = RegGauge("stat_totals", "stat", "dashboards")
M_StatTotal_Users = RegGauge("stat_totals", "stat", "users")
M_StatTotal_Orgs = RegGauge("stat_totals", "stat", "orgs")
M_StatTotal_Playlists = RegGauge("stat_totals", "stat", "playlists")
M_Alerting_Active_Alerts prometheus.Gauge
M_StatTotal_Dashboards prometheus.Gauge
M_StatTotal_Users prometheus.Gauge
M_StatTotal_Orgs prometheus.Gauge
M_StatTotal_Playlists prometheus.Gauge
M_Grafana_Version *prometheus.GaugeVec
)
func init() {
M_Instance_Start = prometheus.NewCounter(prometheus.CounterOpts{
Name: "instance_start_total",
Help: "counter for started instances",
Namespace: exporterName,
})
M_Page_Status = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "page_response_status_total",
Help: "page http response status",
Namespace: exporterName,
},
[]string{"code"},
)
M_Api_Status = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "api_response_status_total",
Help: "api http response status",
Namespace: exporterName,
},
[]string{"code"},
)
M_Proxy_Status = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "proxy_response_status_total",
Help: "proxy http response status",
Namespace: exporterName,
},
[]string{"code"},
)
M_Http_Request_Total = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "http_request_total",
Help: "http request counter",
},
[]string{"handler", "statuscode", "method"},
)
M_Http_Request_Summary = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "http_request_duration_milliseconds",
Help: "http request summary",
},
[]string{"handler", "statuscode", "method"},
)
M_Api_User_SignUpStarted = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_user_signup_started_total",
Help: "amount of users who started the signup flow",
Namespace: exporterName,
})
M_Api_User_SignUpCompleted = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_user_signup_completed_total",
Help: "amount of users who completed the signup flow",
Namespace: exporterName,
})
M_Api_User_SignUpInvite = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_user_signup_invite_total",
Help: "amount of users who have been invited",
Namespace: exporterName,
})
M_Api_Dashboard_Save = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "api_dashboard_save_milliseconds",
Help: "summary for dashboard save duration",
Namespace: exporterName,
})
M_Api_Dashboard_Get = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "api_dashboard_get_milliseconds",
Help: "summary for dashboard get duration",
Namespace: exporterName,
})
M_Api_Dashboard_Search = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "api_dashboard_search_milliseconds",
Help: "summary for dashboard search duration",
Namespace: exporterName,
})
M_Api_Admin_User_Create = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_admin_user_created_total",
Help: "api admin user created counter",
Namespace: exporterName,
})
M_Api_Login_Post = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_login_post_total",
Help: "api login post counter",
Namespace: exporterName,
})
M_Api_Login_OAuth = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_login_oauth_total",
Help: "api login oauth counter",
Namespace: exporterName,
})
M_Api_Org_Create = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_org_create_total",
Help: "api org created counter",
Namespace: exporterName,
})
M_Api_Dashboard_Snapshot_Create = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_dashboard_snapshot_create_total",
Help: "dashboard snapshots created",
Namespace: exporterName,
})
M_Api_Dashboard_Snapshot_External = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_dashboard_snapshot_external_total",
Help: "external dashboard snapshots created",
Namespace: exporterName,
})
M_Api_Dashboard_Snapshot_Get = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_dashboard_snapshot_get_total",
Help: "loaded dashboards",
Namespace: exporterName,
})
M_Api_Dashboard_Insert = prometheus.NewCounter(prometheus.CounterOpts{
Name: "api_models_dashboard_insert_total",
Help: "dashboards inserted ",
Namespace: exporterName,
})
M_Alerting_Result_State = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "alerting_result_total",
Help: "alert execution result counter",
Namespace: exporterName,
}, []string{"state"})
M_Alerting_Notification_Sent = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "alerting_notification_sent_total",
Help: "counter for how many alert notifications been sent",
Namespace: exporterName,
}, []string{"type"})
M_Aws_CloudWatch_GetMetricStatistics = prometheus.NewCounter(prometheus.CounterOpts{
Name: "aws_cloudwatch_get_metric_statistics_total",
Help: "counter for getting metric statistics from aws",
Namespace: exporterName,
})
M_Aws_CloudWatch_ListMetrics = prometheus.NewCounter(prometheus.CounterOpts{
Name: "aws_cloudwatch_list_metrics_total",
Help: "counter for getting list of metrics from aws",
Namespace: exporterName,
})
M_DB_DataSource_QueryById = prometheus.NewCounter(prometheus.CounterOpts{
Name: "db_datasource_query_by_id_total",
Help: "counter for getting datasource by id",
Namespace: exporterName,
})
M_DataSource_ProxyReq_Timer = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "api_dataproxy_request_all_milliseconds",
Help: "summary for dashboard search duration",
Namespace: exporterName,
})
M_Alerting_Execution_Time = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "alerting_execution_time_milliseconds",
Help: "summary of alert exeuction duration",
Namespace: exporterName,
})
M_Alerting_Active_Alerts = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "alerting_active_alerts",
Help: "amount of active alerts",
Namespace: exporterName,
})
M_StatTotal_Dashboards = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "stat_totals_dashboard",
Help: "total amount of dashboards",
Namespace: exporterName,
})
M_StatTotal_Users = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "stat_total_users",
Help: "total amount of users",
Namespace: exporterName,
})
M_StatTotal_Orgs = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "stat_total_orgs",
Help: "total amount of orgs",
Namespace: exporterName,
})
M_StatTotal_Playlists = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "stat_total_playlists",
Help: "total amount of playlists",
Namespace: exporterName,
})
M_Grafana_Version = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "info",
Help: "Information about the Grafana",
Namespace: exporterName,
}, []string{"version"})
}
func initMetricVars(settings *MetricSettings) {
prometheus.MustRegister(
M_Instance_Start,
M_Page_Status,
M_Api_Status,
M_Proxy_Status,
M_Http_Request_Total,
M_Http_Request_Summary,
M_Api_User_SignUpStarted,
M_Api_User_SignUpCompleted,
M_Api_User_SignUpInvite,
M_Api_Dashboard_Save,
M_Api_Dashboard_Get,
M_Api_Dashboard_Search,
M_DataSource_ProxyReq_Timer,
M_Alerting_Execution_Time,
M_Api_Admin_User_Create,
M_Api_Login_Post,
M_Api_Login_OAuth,
M_Api_Org_Create,
M_Api_Dashboard_Snapshot_Create,
M_Api_Dashboard_Snapshot_External,
M_Api_Dashboard_Snapshot_Get,
M_Api_Dashboard_Insert,
M_Alerting_Result_State,
M_Alerting_Notification_Sent,
M_Aws_CloudWatch_GetMetricStatistics,
M_Aws_CloudWatch_ListMetrics,
M_DB_DataSource_QueryById,
M_Alerting_Active_Alerts,
M_StatTotal_Dashboards,
M_StatTotal_Users,
M_StatTotal_Orgs,
M_StatTotal_Playlists,
M_Grafana_Version)
go instrumentationLoop(settings)
}
func instrumentationLoop(settings *MetricSettings) chan struct{} {
M_Instance_Start.Inc()
onceEveryDayTick := time.NewTicker(time.Hour * 24)
secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds))
for {
select {
case <-onceEveryDayTick.C:
sendUsageStats()
case <-secondTicker.C:
updateTotalStats()
}
}
}
var metricPublishCounter int64 = 0
func updateTotalStats() {
metricPublishCounter++
if metricPublishCounter == 1 || metricPublishCounter%10 == 0 {
statsQuery := models.GetSystemStatsQuery{}
if err := bus.Dispatch(&statsQuery); err != nil {
metricsLogger.Error("Failed to get system stats", "error", err)
return
}
M_StatTotal_Dashboards.Set(float64(statsQuery.Result.Dashboards))
M_StatTotal_Users.Set(float64(statsQuery.Result.Users))
M_StatTotal_Playlists.Set(float64(statsQuery.Result.Playlists))
M_StatTotal_Orgs.Set(float64(statsQuery.Result.Orgs))
}
}
func sendUsageStats() {
if !setting.ReportingEnabled {
return
}
metricsLogger.Debug("Sending anonymous usage stats to stats.grafana.org")
version := strings.Replace(setting.BuildVersion, ".", "_", -1)
metrics := map[string]interface{}{}
report := map[string]interface{}{
"version": version,
"metrics": metrics,
"os": runtime.GOOS,
"arch": runtime.GOARCH,
}
statsQuery := models.GetSystemStatsQuery{}
if err := bus.Dispatch(&statsQuery); err != nil {
metricsLogger.Error("Failed to get system stats", "error", err)
return
}
metrics["stats.dashboards.count"] = statsQuery.Result.Dashboards
metrics["stats.users.count"] = statsQuery.Result.Users
metrics["stats.orgs.count"] = statsQuery.Result.Orgs
metrics["stats.playlist.count"] = statsQuery.Result.Playlists
metrics["stats.plugins.apps.count"] = len(plugins.Apps)
metrics["stats.plugins.panels.count"] = len(plugins.Panels)
metrics["stats.plugins.datasources.count"] = len(plugins.DataSources)
metrics["stats.alerts.count"] = statsQuery.Result.Alerts
metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers
metrics["stats.datasources.count"] = statsQuery.Result.Datasources
dsStats := models.GetDataSourceStatsQuery{}
if err := bus.Dispatch(&dsStats); err != nil {
metricsLogger.Error("Failed to get datasource stats", "error", err)
return
}
// send counters for each data source
// but ignore any custom data sources
// as sending that name could be sensitive information
dsOtherCount := 0
for _, dsStat := range dsStats.Result {
if models.IsKnownDataSourcePlugin(dsStat.Type) {
metrics["stats.ds."+dsStat.Type+".count"] = dsStat.Count
} else {
dsOtherCount += dsStat.Count
}
}
metrics["stats.ds.other.count"] = dsOtherCount
out, _ := json.MarshalIndent(report, "", " ")
data := bytes.NewBuffer(out)
client := http.Client{Timeout: time.Duration(5 * time.Second)}
go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data)
}

View File

@ -1,135 +0,0 @@
package metrics
import (
"bytes"
"encoding/json"
"net/http"
"runtime"
"strings"
"time"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/log"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/setting"
)
var metricsLogger log.Logger = log.New("metrics")
var metricPublishCounter int64 = 0
func Init() {
settings := readSettings()
initMetricVars(settings)
go instrumentationLoop(settings)
}
func instrumentationLoop(settings *MetricSettings) chan struct{} {
M_Instance_Start.Inc(1)
onceEveryDayTick := time.NewTicker(time.Hour * 24)
secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds))
for {
select {
case <-onceEveryDayTick.C:
sendUsageStats()
case <-secondTicker.C:
if settings.Enabled {
sendMetrics(settings)
}
}
}
}
func sendMetrics(settings *MetricSettings) {
if len(settings.Publishers) == 0 {
return
}
updateTotalStats()
metrics := MetricStats.GetSnapshots()
for _, publisher := range settings.Publishers {
publisher.Publish(metrics)
}
}
func updateTotalStats() {
// every interval also publish totals
metricPublishCounter++
if metricPublishCounter%10 == 0 {
// get stats
statsQuery := m.GetSystemStatsQuery{}
if err := bus.Dispatch(&statsQuery); err != nil {
metricsLogger.Error("Failed to get system stats", "error", err)
return
}
M_StatTotal_Dashboards.Update(statsQuery.Result.Dashboards)
M_StatTotal_Users.Update(statsQuery.Result.Users)
M_StatTotal_Playlists.Update(statsQuery.Result.Playlists)
M_StatTotal_Orgs.Update(statsQuery.Result.Orgs)
}
}
func sendUsageStats() {
if !setting.ReportingEnabled {
return
}
metricsLogger.Debug("Sending anonymous usage stats to stats.grafana.org")
version := strings.Replace(setting.BuildVersion, ".", "_", -1)
metrics := map[string]interface{}{}
report := map[string]interface{}{
"version": version,
"metrics": metrics,
"os": runtime.GOOS,
"arch": runtime.GOARCH,
}
statsQuery := m.GetSystemStatsQuery{}
if err := bus.Dispatch(&statsQuery); err != nil {
metricsLogger.Error("Failed to get system stats", "error", err)
return
}
metrics["stats.dashboards.count"] = statsQuery.Result.Dashboards
metrics["stats.users.count"] = statsQuery.Result.Users
metrics["stats.orgs.count"] = statsQuery.Result.Orgs
metrics["stats.playlist.count"] = statsQuery.Result.Playlists
metrics["stats.plugins.apps.count"] = len(plugins.Apps)
metrics["stats.plugins.panels.count"] = len(plugins.Panels)
metrics["stats.plugins.datasources.count"] = len(plugins.DataSources)
metrics["stats.alerts.count"] = statsQuery.Result.Alerts
metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers
metrics["stats.datasources.count"] = statsQuery.Result.Datasources
dsStats := m.GetDataSourceStatsQuery{}
if err := bus.Dispatch(&dsStats); err != nil {
metricsLogger.Error("Failed to get datasource stats", "error", err)
return
}
// send counters for each data source
// but ignore any custom data sources
// as sending that name could be sensitive information
dsOtherCount := 0
for _, dsStat := range dsStats.Result {
if m.IsKnownDataSourcePlugin(dsStat.Type) {
metrics["stats.ds."+dsStat.Type+".count"] = dsStat.Count
} else {
dsOtherCount += dsStat.Count
}
}
metrics["stats.ds.other.count"] = dsOtherCount
out, _ := json.MarshalIndent(report, "", " ")
data := bytes.NewBuffer(out)
client := http.Client{Timeout: time.Duration(5 * time.Second)}
go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data)
}

View File

@ -1,37 +0,0 @@
package metrics
import "sync"
type Registry interface {
GetSnapshots() []Metric
Register(metric Metric)
}
// The standard implementation of a Registry is a mutex-protected map
// of names to metrics.
type StandardRegistry struct {
metrics []Metric
mutex sync.Mutex
}
// Create a new registry.
func NewRegistry() Registry {
return &StandardRegistry{
metrics: make([]Metric, 0),
}
}
func (r *StandardRegistry) Register(metric Metric) {
r.mutex.Lock()
defer r.mutex.Unlock()
r.metrics = append(r.metrics, metric)
}
// Call the given function for each registered metric.
func (r *StandardRegistry) GetSnapshots() []Metric {
metrics := make([]Metric, len(r.metrics))
for i, metric := range r.metrics {
metrics[i] = metric.Snapshot()
}
return metrics
}

View File

@ -1,607 +0,0 @@
// includes code from
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
// Copyright 2012 Richard Crowley. All rights reserved.
package metrics
import (
"math"
"math/rand"
"sort"
"sync"
"time"
)
const rescaleThreshold = time.Hour
// Samples maintain a statistically-significant selection of values from
// a stream.
type Sample interface {
Clear()
Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Size() int
Snapshot() Sample
StdDev() float64
Sum() int64
Update(int64)
Values() []int64
Variance() float64
}
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
// Decay Model for Streaming Systems".
//
// <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
type ExpDecaySample struct {
alpha float64
count int64
mutex sync.Mutex
reservoirSize int
t0, t1 time.Time
values *expDecaySampleHeap
}
// NewExpDecaySample constructs a new exponentially-decaying sample with the
// given reservoir size and alpha.
func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
s := &ExpDecaySample{
alpha: alpha,
reservoirSize: reservoirSize,
t0: time.Now(),
values: newExpDecaySampleHeap(reservoirSize),
}
s.t1 = s.t0.Add(rescaleThreshold)
return s
}
// Clear clears all samples.
func (s *ExpDecaySample) Clear() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count = 0
s.t0 = time.Now()
s.t1 = s.t0.Add(rescaleThreshold)
s.values.Clear()
}
// Count returns the number of samples recorded, which may exceed the
// reservoir size.
func (s *ExpDecaySample) Count() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.count
}
// Max returns the maximum value in the sample, which may not be the maximum
// value ever to be part of the sample.
func (s *ExpDecaySample) Max() int64 {
return SampleMax(s.Values())
}
// Mean returns the mean of the values in the sample.
func (s *ExpDecaySample) Mean() float64 {
return SampleMean(s.Values())
}
// Min returns the minimum value in the sample, which may not be the minimum
// value ever to be part of the sample.
func (s *ExpDecaySample) Min() int64 {
return SampleMin(s.Values())
}
// Percentile returns an arbitrary percentile of values in the sample.
func (s *ExpDecaySample) Percentile(p float64) float64 {
return SamplePercentile(s.Values(), p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the
// sample.
func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
return SamplePercentiles(s.Values(), ps)
}
// Size returns the size of the sample, which is at most the reservoir size.
func (s *ExpDecaySample) Size() int {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.values.Size()
}
// Snapshot returns a read-only copy of the sample.
func (s *ExpDecaySample) Snapshot() Sample {
s.mutex.Lock()
defer s.mutex.Unlock()
vals := s.values.Values()
values := make([]int64, len(vals))
for i, v := range vals {
values[i] = v.v
}
return &SampleSnapshot{
count: s.count,
values: values,
}
}
// StdDev returns the standard deviation of the values in the sample.
func (s *ExpDecaySample) StdDev() float64 {
return SampleStdDev(s.Values())
}
// Sum returns the sum of the values in the sample.
func (s *ExpDecaySample) Sum() int64 {
return SampleSum(s.Values())
}
// Update samples a new value.
func (s *ExpDecaySample) Update(v int64) {
s.update(time.Now(), v)
}
// Values returns a copy of the values in the sample.
func (s *ExpDecaySample) Values() []int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
vals := s.values.Values()
values := make([]int64, len(vals))
for i, v := range vals {
values[i] = v.v
}
return values
}
// Variance returns the variance of the values in the sample.
func (s *ExpDecaySample) Variance() float64 {
return SampleVariance(s.Values())
}
// update samples a new value at a particular timestamp. This is a method all
// its own to facilitate testing.
func (s *ExpDecaySample) update(t time.Time, v int64) {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count++
if s.values.Size() == s.reservoirSize {
s.values.Pop()
}
s.values.Push(expDecaySample{
k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
v: v,
})
if t.After(s.t1) {
values := s.values.Values()
t0 := s.t0
s.values.Clear()
s.t0 = t
s.t1 = s.t0.Add(rescaleThreshold)
for _, v := range values {
v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
s.values.Push(v)
}
}
}
// NilSample is a no-op Sample.
type NilSample struct{}
// Clear is a no-op.
func (NilSample) Clear() {}
// Count is a no-op.
func (NilSample) Count() int64 { return 0 }
// Max is a no-op.
func (NilSample) Max() int64 { return 0 }
// Mean is a no-op.
func (NilSample) Mean() float64 { return 0.0 }
// Min is a no-op.
func (NilSample) Min() int64 { return 0 }
// Percentile is a no-op.
func (NilSample) Percentile(p float64) float64 { return 0.0 }
// Percentiles is a no-op.
func (NilSample) Percentiles(ps []float64) []float64 {
return make([]float64, len(ps))
}
// Size is a no-op.
func (NilSample) Size() int { return 0 }
// Sample is a no-op.
func (NilSample) Snapshot() Sample { return NilSample{} }
// StdDev is a no-op.
func (NilSample) StdDev() float64 { return 0.0 }
// Sum is a no-op.
func (NilSample) Sum() int64 { return 0 }
// Update is a no-op.
func (NilSample) Update(v int64) {}
// Values is a no-op.
func (NilSample) Values() []int64 { return []int64{} }
// Variance is a no-op.
func (NilSample) Variance() float64 { return 0.0 }
// SampleMax returns the maximum value of the slice of int64.
func SampleMax(values []int64) int64 {
if 0 == len(values) {
return 0
}
var max int64 = math.MinInt64
for _, v := range values {
if max < v {
max = v
}
}
return max
}
// SampleMean returns the mean value of the slice of int64.
func SampleMean(values []int64) float64 {
if 0 == len(values) {
return 0.0
}
return float64(SampleSum(values)) / float64(len(values))
}
// SampleMin returns the minimum value of the slice of int64.
func SampleMin(values []int64) int64 {
if 0 == len(values) {
return 0
}
var min int64 = math.MaxInt64
for _, v := range values {
if min > v {
min = v
}
}
return min
}
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
func SamplePercentile(values int64Slice, p float64) float64 {
return SamplePercentiles(values, []float64{p})[0]
}
// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
// int64.
func SamplePercentiles(values int64Slice, ps []float64) []float64 {
scores := make([]float64, len(ps))
size := len(values)
if size > 0 {
sort.Sort(values)
for i, p := range ps {
pos := p * float64(size+1)
if pos < 1.0 {
scores[i] = float64(values[0])
} else if pos >= float64(size) {
scores[i] = float64(values[size-1])
} else {
lower := float64(values[int(pos)-1])
upper := float64(values[int(pos)])
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
}
}
}
return scores
}
// SampleSnapshot is a read-only copy of another Sample.
type SampleSnapshot struct {
count int64
values []int64
}
// Clear panics.
func (*SampleSnapshot) Clear() {
panic("Clear called on a SampleSnapshot")
}
// Count returns the count of inputs at the time the snapshot was taken.
func (s *SampleSnapshot) Count() int64 { return s.count }
// Max returns the maximal value at the time the snapshot was taken.
func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
// Mean returns the mean value at the time the snapshot was taken.
func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
// Min returns the minimal value at the time the snapshot was taken.
func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
// Percentile returns an arbitrary percentile of values at the time the
// snapshot was taken.
func (s *SampleSnapshot) Percentile(p float64) float64 {
return SamplePercentile(s.values, p)
}
// Percentiles returns a slice of arbitrary percentiles of values at the time
// the snapshot was taken.
func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
return SamplePercentiles(s.values, ps)
}
// Size returns the size of the sample at the time the snapshot was taken.
func (s *SampleSnapshot) Size() int { return len(s.values) }
// Snapshot returns the snapshot.
func (s *SampleSnapshot) Snapshot() Sample { return s }
// StdDev returns the standard deviation of values at the time the snapshot was
// taken.
func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
// Sum returns the sum of values at the time the snapshot was taken.
func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
// Update panics.
func (*SampleSnapshot) Update(int64) {
panic("Update called on a SampleSnapshot")
}
// Values returns a copy of the values in the sample.
func (s *SampleSnapshot) Values() []int64 {
values := make([]int64, len(s.values))
copy(values, s.values)
return values
}
// Variance returns the variance of values at the time the snapshot was taken.
func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
// SampleStdDev returns the standard deviation of the slice of int64.
func SampleStdDev(values []int64) float64 {
return math.Sqrt(SampleVariance(values))
}
// SampleSum returns the sum of the slice of int64.
func SampleSum(values []int64) int64 {
var sum int64
for _, v := range values {
sum += v
}
return sum
}
// SampleVariance returns the variance of the slice of int64.
func SampleVariance(values []int64) float64 {
if 0 == len(values) {
return 0.0
}
m := SampleMean(values)
var sum float64
for _, v := range values {
d := float64(v) - m
sum += d * d
}
return sum / float64(len(values))
}
// A uniform sample using Vitter's Algorithm R.
//
// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
type UniformSample struct {
count int64
mutex sync.Mutex
reservoirSize int
values []int64
}
// NewUniformSample constructs a new uniform sample with the given reservoir
// size.
func NewUniformSample(reservoirSize int) Sample {
return &UniformSample{
reservoirSize: reservoirSize,
values: make([]int64, 0, reservoirSize),
}
}
// Clear clears all samples.
func (s *UniformSample) Clear() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count = 0
s.values = make([]int64, 0, s.reservoirSize)
}
// Count returns the number of samples recorded, which may exceed the
// reservoir size.
func (s *UniformSample) Count() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.count
}
// Max returns the maximum value in the sample, which may not be the maximum
// value ever to be part of the sample.
func (s *UniformSample) Max() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleMax(s.values)
}
// Mean returns the mean of the values in the sample.
func (s *UniformSample) Mean() float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleMean(s.values)
}
// Min returns the minimum value in the sample, which may not be the minimum
// value ever to be part of the sample.
func (s *UniformSample) Min() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleMin(s.values)
}
// Percentile returns an arbitrary percentile of values in the sample.
func (s *UniformSample) Percentile(p float64) float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SamplePercentile(s.values, p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the
// sample.
func (s *UniformSample) Percentiles(ps []float64) []float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SamplePercentiles(s.values, ps)
}
// Size returns the size of the sample, which is at most the reservoir size.
func (s *UniformSample) Size() int {
s.mutex.Lock()
defer s.mutex.Unlock()
return len(s.values)
}
// Snapshot returns a read-only copy of the sample.
func (s *UniformSample) Snapshot() Sample {
s.mutex.Lock()
defer s.mutex.Unlock()
values := make([]int64, len(s.values))
copy(values, s.values)
return &SampleSnapshot{
count: s.count,
values: values,
}
}
// StdDev returns the standard deviation of the values in the sample.
func (s *UniformSample) StdDev() float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleStdDev(s.values)
}
// Sum returns the sum of the values in the sample.
func (s *UniformSample) Sum() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleSum(s.values)
}
// Update samples a new value.
func (s *UniformSample) Update(v int64) {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count++
if len(s.values) < s.reservoirSize {
s.values = append(s.values, v)
} else {
r := rand.Int63n(s.count)
if r < int64(len(s.values)) {
s.values[int(r)] = v
}
}
}
// Values returns a copy of the values in the sample.
func (s *UniformSample) Values() []int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
values := make([]int64, len(s.values))
copy(values, s.values)
return values
}
// Variance returns the variance of the values in the sample.
func (s *UniformSample) Variance() float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleVariance(s.values)
}
// expDecaySample represents an individual sample in a heap.
type expDecaySample struct {
k float64
v int64
}
func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
}
// expDecaySampleHeap is a min-heap of expDecaySamples.
// The internal implementation is copied from the standard library's container/heap
type expDecaySampleHeap struct {
s []expDecaySample
}
func (h *expDecaySampleHeap) Clear() {
h.s = h.s[:0]
}
func (h *expDecaySampleHeap) Push(s expDecaySample) {
n := len(h.s)
h.s = h.s[0 : n+1]
h.s[n] = s
h.up(n)
}
func (h *expDecaySampleHeap) Pop() expDecaySample {
n := len(h.s) - 1
h.s[0], h.s[n] = h.s[n], h.s[0]
h.down(0, n)
n = len(h.s)
s := h.s[n-1]
h.s = h.s[0 : n-1]
return s
}
func (h *expDecaySampleHeap) Size() int {
return len(h.s)
}
func (h *expDecaySampleHeap) Values() []expDecaySample {
return h.s
}
func (h *expDecaySampleHeap) up(j int) {
for {
i := (j - 1) / 2 // parent
if i == j || !(h.s[j].k < h.s[i].k) {
break
}
h.s[i], h.s[j] = h.s[j], h.s[i]
j = i
}
}
func (h *expDecaySampleHeap) down(i, n int) {
for {
j1 := 2*i + 1
if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
break
}
j := j1 // left child
if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
j = j2 // = 2*i + 2 // right child
}
if !(h.s[j].k < h.s[i].k) {
break
}
h.s[i], h.s[j] = h.s[j], h.s[i]
i = j
}
}
type int64Slice []int64
func (p int64Slice) Len() int { return len(p) }
func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

View File

@ -1,367 +0,0 @@
// includes code from
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
// Copyright 2012 Richard Crowley. All rights reserved.
package metrics
import (
"math/rand"
"runtime"
"testing"
"time"
)
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
// expensive computations like Variance, the cost of copying the Sample, as
// approximated by a make and copy, is much greater than the cost of the
// computation for small samples and only slightly less for large samples.
func BenchmarkCompute1000(b *testing.B) {
s := make([]int64, 1000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
SampleVariance(s)
}
}
func BenchmarkCompute1000000(b *testing.B) {
s := make([]int64, 1000000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
SampleVariance(s)
}
}
func BenchmarkCopy1000(b *testing.B) {
s := make([]int64, 1000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
sCopy := make([]int64, len(s))
copy(sCopy, s)
}
}
func BenchmarkCopy1000000(b *testing.B) {
s := make([]int64, 1000000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
sCopy := make([]int64, len(s))
copy(sCopy, s)
}
}
func BenchmarkExpDecaySample257(b *testing.B) {
benchmarkSample(b, NewExpDecaySample(257, 0.015))
}
func BenchmarkExpDecaySample514(b *testing.B) {
benchmarkSample(b, NewExpDecaySample(514, 0.015))
}
func BenchmarkExpDecaySample1028(b *testing.B) {
benchmarkSample(b, NewExpDecaySample(1028, 0.015))
}
func BenchmarkUniformSample257(b *testing.B) {
benchmarkSample(b, NewUniformSample(257))
}
func BenchmarkUniformSample514(b *testing.B) {
benchmarkSample(b, NewUniformSample(514))
}
func BenchmarkUniformSample1028(b *testing.B) {
benchmarkSample(b, NewUniformSample(1028))
}
func TestExpDecaySample10(t *testing.T) {
rand.Seed(1)
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 10; i++ {
s.Update(int64(i))
}
if size := s.Count(); 10 != size {
t.Errorf("s.Count(): 10 != %v\n", size)
}
if size := s.Size(); 10 != size {
t.Errorf("s.Size(): 10 != %v\n", size)
}
if l := len(s.Values()); 10 != l {
t.Errorf("len(s.Values()): 10 != %v\n", l)
}
for _, v := range s.Values() {
if v > 10 || v < 0 {
t.Errorf("out of range [0, 10): %v\n", v)
}
}
}
func TestExpDecaySample100(t *testing.T) {
rand.Seed(1)
s := NewExpDecaySample(1000, 0.01)
for i := 0; i < 100; i++ {
s.Update(int64(i))
}
if size := s.Count(); 100 != size {
t.Errorf("s.Count(): 100 != %v\n", size)
}
if size := s.Size(); 100 != size {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); 100 != l {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 100 || v < 0 {
t.Errorf("out of range [0, 100): %v\n", v)
}
}
}
func TestExpDecaySample1000(t *testing.T) {
rand.Seed(1)
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 1000; i++ {
s.Update(int64(i))
}
if size := s.Count(); 1000 != size {
t.Errorf("s.Count(): 1000 != %v\n", size)
}
if size := s.Size(); 100 != size {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); 100 != l {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 1000 || v < 0 {
t.Errorf("out of range [0, 1000): %v\n", v)
}
}
}
// This test makes sure that the sample's priority is not amplified by using
// nanosecond duration since start rather than second duration since start.
// The priority becomes +Inf quickly after starting if this is done,
// effectively freezing the set of samples until a rescale step happens.
func TestExpDecaySampleNanosecondRegression(t *testing.T) {
rand.Seed(1)
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 100; i++ {
s.Update(10)
}
time.Sleep(1 * time.Millisecond)
for i := 0; i < 100; i++ {
s.Update(20)
}
v := s.Values()
avg := float64(0)
for i := 0; i < len(v); i++ {
avg += float64(v[i])
}
avg /= float64(len(v))
if avg > 16 || avg < 14 {
t.Errorf("out of range [14, 16]: %v\n", avg)
}
}
func TestExpDecaySampleRescale(t *testing.T) {
s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
s.update(time.Now(), 1)
s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
for _, v := range s.values.Values() {
if v.k == 0.0 {
t.Fatal("v.k == 0.0")
}
}
}
func TestExpDecaySampleSnapshot(t *testing.T) {
now := time.Now()
rand.Seed(1)
s := NewExpDecaySample(100, 0.99)
for i := 1; i <= 10000; i++ {
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
}
snapshot := s.Snapshot()
s.Update(1)
testExpDecaySampleStatistics(t, snapshot)
}
func TestExpDecaySampleStatistics(t *testing.T) {
now := time.Now()
rand.Seed(1)
s := NewExpDecaySample(100, 0.99)
for i := 1; i <= 10000; i++ {
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
}
testExpDecaySampleStatistics(t, s)
}
func TestUniformSample(t *testing.T) {
rand.Seed(1)
s := NewUniformSample(100)
for i := 0; i < 1000; i++ {
s.Update(int64(i))
}
if size := s.Count(); 1000 != size {
t.Errorf("s.Count(): 1000 != %v\n", size)
}
if size := s.Size(); 100 != size {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); 100 != l {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 1000 || v < 0 {
t.Errorf("out of range [0, 100): %v\n", v)
}
}
}
func TestUniformSampleIncludesTail(t *testing.T) {
rand.Seed(1)
s := NewUniformSample(100)
max := 100
for i := 0; i < max; i++ {
s.Update(int64(i))
}
v := s.Values()
sum := 0
exp := (max - 1) * max / 2
for i := 0; i < len(v); i++ {
sum += int(v[i])
}
if exp != sum {
t.Errorf("sum: %v != %v\n", exp, sum)
}
}
func TestUniformSampleSnapshot(t *testing.T) {
s := NewUniformSample(100)
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
}
snapshot := s.Snapshot()
s.Update(1)
testUniformSampleStatistics(t, snapshot)
}
func TestUniformSampleStatistics(t *testing.T) {
rand.Seed(1)
s := NewUniformSample(100)
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
}
testUniformSampleStatistics(t, s)
}
func benchmarkSample(b *testing.B, s Sample) {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
pauseTotalNs := memStats.PauseTotalNs
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.Update(1)
}
b.StopTimer()
runtime.GC()
runtime.ReadMemStats(&memStats)
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
}
func testExpDecaySampleStatistics(t *testing.T, s Sample) {
if count := s.Count(); 10000 != count {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
if min := s.Min(); 107 != min {
t.Errorf("s.Min(): 107 != %v\n", min)
}
if max := s.Max(); 10000 != max {
t.Errorf("s.Max(): 10000 != %v\n", max)
}
if mean := s.Mean(); 4965.98 != mean {
t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
}
if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
}
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
if 4615 != ps[0] {
t.Errorf("median: 4615 != %v\n", ps[0])
}
if 7672 != ps[1] {
t.Errorf("75th percentile: 7672 != %v\n", ps[1])
}
if 9998.99 != ps[2] {
t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
}
}
func testUniformSampleStatistics(t *testing.T, s Sample) {
if count := s.Count(); 10000 != count {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
if min := s.Min(); 37 != min {
t.Errorf("s.Min(): 37 != %v\n", min)
}
if max := s.Max(); 9989 != max {
t.Errorf("s.Max(): 9989 != %v\n", max)
}
if mean := s.Mean(); 4748.14 != mean {
t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
}
if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
}
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
if 4599 != ps[0] {
t.Errorf("median: 4599 != %v\n", ps[0])
}
if 7380.5 != ps[1] {
t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
}
if 9986.429999999998 != ps[2] {
t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
}
}
// TestUniformSampleConcurrentUpdateCount would expose data race problems with
// concurrent Update and Count calls on Sample when test is called with -race
// argument
func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
s := NewUniformSample(100)
for i := 0; i < 100; i++ {
s.Update(int64(i))
}
quit := make(chan struct{})
go func() {
t := time.NewTicker(10 * time.Millisecond)
for {
select {
case <-t.C:
s.Update(rand.Int63())
case <-quit:
t.Stop()
return
}
}
}()
for i := 0; i < 1000; i++ {
s.Count()
time.Sleep(5 * time.Millisecond)
}
quit <- struct{}{}
}

View File

@ -1,25 +1,27 @@
package metrics
import "github.com/grafana/grafana/pkg/setting"
import (
"strings"
"time"
type MetricPublisher interface {
Publish(metrics []Metric)
}
"github.com/grafana/grafana/pkg/metrics/graphitebridge"
"github.com/grafana/grafana/pkg/setting"
"github.com/prometheus/client_golang/prometheus"
ini "gopkg.in/ini.v1"
)
type MetricSettings struct {
Enabled bool
IntervalSeconds int64
Publishers []MetricPublisher
Enabled bool
IntervalSeconds int64
GraphiteBridgeConfig *graphitebridge.Config
}
func readSettings() *MetricSettings {
func ReadSettings(file *ini.File) *MetricSettings {
var settings = &MetricSettings{
Enabled: false,
Publishers: make([]MetricPublisher, 0),
Enabled: false,
}
var section, err = setting.Cfg.GetSection("metrics")
var section, err = file.GetSection("metrics")
if err != nil {
metricsLogger.Crit("Unable to find metrics config section", "error", err)
return nil
@ -32,12 +34,46 @@ func readSettings() *MetricSettings {
return settings
}
if graphitePublisher, err := CreateGraphitePublisher(); err != nil {
metricsLogger.Error("Failed to init Graphite metric publisher", "error", err)
} else if graphitePublisher != nil {
metricsLogger.Info("Metrics publisher initialized", "type", "graphite")
settings.Publishers = append(settings.Publishers, graphitePublisher)
cfg, err := parseGraphiteSettings(settings, file)
if err != nil {
metricsLogger.Crit("Unable to parse metrics graphite section", "error", err)
return nil
}
settings.GraphiteBridgeConfig = cfg
return settings
}
func parseGraphiteSettings(settings *MetricSettings, file *ini.File) (*graphitebridge.Config, error) {
graphiteSection, err := setting.Cfg.GetSection("metrics.graphite")
if err != nil {
return nil, nil
}
address := graphiteSection.Key("address").String()
if address == "" {
return nil, nil
}
cfg := &graphitebridge.Config{
URL: address,
Prefix: graphiteSection.Key("prefix").MustString("prod.grafana.%(instance_name)s"),
CountersAsDelta: true,
Gatherer: prometheus.DefaultGatherer,
Interval: time.Duration(settings.IntervalSeconds) * time.Second,
Timeout: 10 * time.Second,
Logger: &logWrapper{logger: metricsLogger},
ErrorHandling: graphitebridge.ContinueOnError,
}
safeInstanceName := strings.Replace(setting.InstanceName, ".", "_", -1)
prefix := graphiteSection.Key("prefix").Value()
if prefix == "" {
prefix = "prod.grafana.%(instance_name)s."
}
cfg.Prefix = strings.Replace(prefix, "%(instance_name)s", safeInstanceName, -1)
return cfg, nil
}

View File

@ -1,310 +0,0 @@
// includes code from
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
// Copyright 2012 Richard Crowley. All rights reserved.
package metrics
import (
"sync"
"time"
)
// Timers capture the duration and rate of events.
type Timer interface {
Metric
Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
StdDev() float64
Sum() int64
Time(func())
Update(time.Duration)
UpdateSince(time.Time)
Variance() float64
}
// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
func NewCustomTimer(meta *MetricMeta, h Histogram, m Meter) Timer {
if UseNilMetrics {
return NilTimer{}
}
return &StandardTimer{
MetricMeta: meta,
histogram: h,
meter: m,
}
}
// NewTimer constructs a new StandardTimer using an exponentially-decaying
// sample with the same reservoir size and alpha as UNIX load averages.
func NewTimer(meta *MetricMeta) Timer {
if UseNilMetrics {
return NilTimer{}
}
return &StandardTimer{
MetricMeta: meta,
histogram: NewHistogram(meta, NewExpDecaySample(1028, 0.015)),
meter: NewMeter(meta),
}
}
func RegTimer(name string, tagStrings ...string) Timer {
tr := NewTimer(NewMetricMeta(name, tagStrings))
MetricStats.Register(tr)
return tr
}
// NilTimer is a no-op Timer.
type NilTimer struct {
*MetricMeta
h Histogram
m Meter
}
// Count is a no-op.
func (NilTimer) Count() int64 { return 0 }
// Max is a no-op.
func (NilTimer) Max() int64 { return 0 }
// Mean is a no-op.
func (NilTimer) Mean() float64 { return 0.0 }
// Min is a no-op.
func (NilTimer) Min() int64 { return 0 }
// Percentile is a no-op.
func (NilTimer) Percentile(p float64) float64 { return 0.0 }
// Percentiles is a no-op.
func (NilTimer) Percentiles(ps []float64) []float64 {
return make([]float64, len(ps))
}
// Rate1 is a no-op.
func (NilTimer) Rate1() float64 { return 0.0 }
// Rate5 is a no-op.
func (NilTimer) Rate5() float64 { return 0.0 }
// Rate15 is a no-op.
func (NilTimer) Rate15() float64 { return 0.0 }
// RateMean is a no-op.
func (NilTimer) RateMean() float64 { return 0.0 }
// Snapshot is a no-op.
func (n NilTimer) Snapshot() Metric { return n }
// StdDev is a no-op.
func (NilTimer) StdDev() float64 { return 0.0 }
// Sum is a no-op.
func (NilTimer) Sum() int64 { return 0 }
// Time is a no-op.
func (NilTimer) Time(func()) {}
// Update is a no-op.
func (NilTimer) Update(time.Duration) {}
// UpdateSince is a no-op.
func (NilTimer) UpdateSince(time.Time) {}
// Variance is a no-op.
func (NilTimer) Variance() float64 { return 0.0 }
// StandardTimer is the standard implementation of a Timer and uses a Histogram
// and Meter.
type StandardTimer struct {
*MetricMeta
histogram Histogram
meter Meter
mutex sync.Mutex
}
// Count returns the number of events recorded.
func (t *StandardTimer) Count() int64 {
return t.histogram.Count()
}
// Max returns the maximum value in the sample.
func (t *StandardTimer) Max() int64 {
return t.histogram.Max()
}
// Mean returns the mean of the values in the sample.
func (t *StandardTimer) Mean() float64 {
return t.histogram.Mean()
}
// Min returns the minimum value in the sample.
func (t *StandardTimer) Min() int64 {
return t.histogram.Min()
}
// Percentile returns an arbitrary percentile of the values in the sample.
func (t *StandardTimer) Percentile(p float64) float64 {
return t.histogram.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of the values in the
// sample.
func (t *StandardTimer) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps)
}
// Rate1 returns the one-minute moving average rate of events per second.
func (t *StandardTimer) Rate1() float64 {
return t.meter.Rate1()
}
// Rate5 returns the five-minute moving average rate of events per second.
func (t *StandardTimer) Rate5() float64 {
return t.meter.Rate5()
}
// Rate15 returns the fifteen-minute moving average rate of events per second.
func (t *StandardTimer) Rate15() float64 {
return t.meter.Rate15()
}
// RateMean returns the meter's mean rate of events per second.
func (t *StandardTimer) RateMean() float64 {
return t.meter.RateMean()
}
// Snapshot returns a read-only copy of the timer.
func (t *StandardTimer) Snapshot() Metric {
t.mutex.Lock()
defer t.mutex.Unlock()
return &TimerSnapshot{
MetricMeta: t.MetricMeta,
histogram: t.histogram.Snapshot().(*HistogramSnapshot),
meter: t.meter.Snapshot().(*MeterSnapshot),
}
}
// StdDev returns the standard deviation of the values in the sample.
func (t *StandardTimer) StdDev() float64 {
return t.histogram.StdDev()
}
// Sum returns the sum in the sample.
func (t *StandardTimer) Sum() int64 {
return t.histogram.Sum()
}
// Record the duration of the execution of the given function.
func (t *StandardTimer) Time(f func()) {
ts := time.Now()
f()
t.Update(time.Since(ts))
}
// Record the duration of an event.
func (t *StandardTimer) Update(d time.Duration) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.histogram.Update(int64(d))
t.meter.Mark(1)
}
// Record the duration of an event that started at a time and ends now.
func (t *StandardTimer) UpdateSince(ts time.Time) {
t.mutex.Lock()
defer t.mutex.Unlock()
sinceMs := time.Since(ts) / time.Millisecond
t.histogram.Update(int64(sinceMs))
t.meter.Mark(1)
}
// Variance returns the variance of the values in the sample.
func (t *StandardTimer) Variance() float64 {
return t.histogram.Variance()
}
// TimerSnapshot is a read-only copy of another Timer.
type TimerSnapshot struct {
*MetricMeta
histogram *HistogramSnapshot
meter *MeterSnapshot
}
// Count returns the number of events recorded at the time the snapshot was
// taken.
func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
// Max returns the maximum value at the time the snapshot was taken.
func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
// Mean returns the mean value at the time the snapshot was taken.
func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
// Min returns the minimum value at the time the snapshot was taken.
func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
// Percentile returns an arbitrary percentile of sampled values at the time the
// snapshot was taken.
func (t *TimerSnapshot) Percentile(p float64) float64 {
return t.histogram.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of sampled values at
// the time the snapshot was taken.
func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps)
}
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
// Snapshot returns the snapshot.
func (t *TimerSnapshot) Snapshot() Metric { return t }
// StdDev returns the standard deviation of the values at the time the snapshot
// was taken.
func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
// Sum returns the sum at the time the snapshot was taken.
func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
// Time panics.
func (*TimerSnapshot) Time(func()) {
panic("Time called on a TimerSnapshot")
}
// Update panics.
func (*TimerSnapshot) Update(time.Duration) {
panic("Update called on a TimerSnapshot")
}
// UpdateSince panics.
func (*TimerSnapshot) UpdateSince(time.Time) {
panic("UpdateSince called on a TimerSnapshot")
}
// Variance returns the variance of the values at the time the snapshot was
// taken.
func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }

View File

@ -19,8 +19,8 @@ import (
"net/http"
"time"
"github.com/grafana/grafana/pkg/metrics"
"github.com/grafana/grafana/pkg/setting"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/macaron.v1"
)
@ -35,8 +35,8 @@ func Logger() macaron.Handler {
timeTakenMs := time.Since(start) / time.Millisecond
if timer, ok := c.Data["perfmon.timer"]; ok {
timerTyped := timer.(metrics.Timer)
timerTyped.Update(timeTakenMs)
timerTyped := timer.(prometheus.Summary)
timerTyped.Observe(float64(timeTakenMs))
}
status := rw.Status()

View File

@ -10,10 +10,10 @@ import (
"github.com/grafana/grafana/pkg/components/apikeygen"
"github.com/grafana/grafana/pkg/log"
l "github.com/grafana/grafana/pkg/login"
"github.com/grafana/grafana/pkg/metrics"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
"github.com/prometheus/client_golang/prometheus"
)
type Context struct {
@ -251,7 +251,7 @@ func (ctx *Context) HasHelpFlag(flag m.HelpFlags1) bool {
return ctx.HelpFlags1.HasFlag(flag)
}
func (ctx *Context) TimeRequest(timer metrics.Timer) {
func (ctx *Context) TimeRequest(timer prometheus.Summary) {
ctx.Data["perfmon.timer"] = timer
}

View File

@ -2,19 +2,28 @@ package middleware
import (
"net/http"
"strconv"
"strings"
"time"
"github.com/grafana/grafana/pkg/metrics"
"gopkg.in/macaron.v1"
)
func RequestMetrics() macaron.Handler {
func RequestMetrics(handler string) macaron.Handler {
return func(res http.ResponseWriter, req *http.Request, c *macaron.Context) {
rw := res.(macaron.ResponseWriter)
now := time.Now()
c.Next()
status := rw.Status()
code := sanitizeCode(status)
method := sanitizeMethod(req.Method)
metrics.M_Http_Request_Total.WithLabelValues(handler, code, method).Inc()
duration := time.Since(now).Nanoseconds() / int64(time.Millisecond)
metrics.M_Http_Request_Summary.WithLabelValues(handler, code, method).Observe(float64(duration))
if strings.HasPrefix(req.RequestURI, "/api/datasources/proxy") {
countProxyRequests(status)
} else if strings.HasPrefix(req.RequestURI, "/api/") {
@ -28,38 +37,165 @@ func RequestMetrics() macaron.Handler {
func countApiRequests(status int) {
switch status {
case 200:
metrics.M_Api_Status_200.Inc(1)
metrics.M_Api_Status.WithLabelValues("200").Inc()
case 404:
metrics.M_Api_Status_404.Inc(1)
metrics.M_Api_Status.WithLabelValues("404").Inc()
case 500:
metrics.M_Api_Status_500.Inc(1)
metrics.M_Api_Status.WithLabelValues("500").Inc()
default:
metrics.M_Api_Status_Unknown.Inc(1)
metrics.M_Api_Status.WithLabelValues("unknown").Inc()
}
}
func countPageRequests(status int) {
switch status {
case 200:
metrics.M_Page_Status_200.Inc(1)
metrics.M_Page_Status.WithLabelValues("200").Inc()
case 404:
metrics.M_Page_Status_404.Inc(1)
metrics.M_Page_Status.WithLabelValues("404").Inc()
case 500:
metrics.M_Page_Status_500.Inc(1)
metrics.M_Page_Status.WithLabelValues("500").Inc()
default:
metrics.M_Page_Status_Unknown.Inc(1)
metrics.M_Page_Status.WithLabelValues("unknown").Inc()
}
}
func countProxyRequests(status int) {
switch status {
case 200:
metrics.M_Proxy_Status_200.Inc(1)
metrics.M_Proxy_Status.WithLabelValues("200").Inc()
case 404:
metrics.M_Proxy_Status_404.Inc(1)
metrics.M_Proxy_Status.WithLabelValues("400").Inc()
case 500:
metrics.M_Proxy_Status_500.Inc(1)
metrics.M_Proxy_Status.WithLabelValues("500").Inc()
default:
metrics.M_Proxy_Status_Unknown.Inc(1)
metrics.M_Proxy_Status.WithLabelValues("unknown").Inc()
}
}
func sanitizeMethod(m string) string {
switch m {
case "GET", "get":
return "get"
case "PUT", "put":
return "put"
case "HEAD", "head":
return "head"
case "POST", "post":
return "post"
case "DELETE", "delete":
return "delete"
case "CONNECT", "connect":
return "connect"
case "OPTIONS", "options":
return "options"
case "NOTIFY", "notify":
return "notify"
default:
return strings.ToLower(m)
}
}
// If the wrapped http.Handler has not set a status code, i.e. the value is
// currently 0, santizeCode will return 200, for consistency with behavior in
// the stdlib.
func sanitizeCode(s int) string {
switch s {
case 100:
return "100"
case 101:
return "101"
case 200, 0:
return "200"
case 201:
return "201"
case 202:
return "202"
case 203:
return "203"
case 204:
return "204"
case 205:
return "205"
case 206:
return "206"
case 300:
return "300"
case 301:
return "301"
case 302:
return "302"
case 304:
return "304"
case 305:
return "305"
case 307:
return "307"
case 400:
return "400"
case 401:
return "401"
case 402:
return "402"
case 403:
return "403"
case 404:
return "404"
case 405:
return "405"
case 406:
return "406"
case 407:
return "407"
case 408:
return "408"
case 409:
return "409"
case 410:
return "410"
case 411:
return "411"
case 412:
return "412"
case 413:
return "413"
case 414:
return "414"
case 415:
return "415"
case 416:
return "416"
case 417:
return "417"
case 418:
return "418"
case 500:
return "500"
case 501:
return "501"
case 502:
return "502"
case 503:
return "503"
case 504:
return "504"
case 505:
return "505"
case 428:
return "428"
case 429:
return "429"
case 431:
return "431"
case 511:
return "511"
default:
return strconv.Itoa(s)
}
}

View File

@ -0,0 +1,36 @@
package middleware
import (
"fmt"
"net/http"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"gopkg.in/macaron.v1"
)
func RequestTracing(handler string) macaron.Handler {
return func(res http.ResponseWriter, req *http.Request, c *macaron.Context) {
rw := res.(macaron.ResponseWriter)
tracer := opentracing.GlobalTracer()
wireContext, _ := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(req.Header))
span := tracer.StartSpan(fmt.Sprintf("HTTP %s", handler), ext.RPCServerOption(wireContext))
defer span.Finish()
ctx := opentracing.ContextWithSpan(req.Context(), span)
c.Req.Request = req.WithContext(ctx)
c.Next()
status := rw.Status()
ext.HTTPStatusCode.Set(span, uint16(status))
ext.HTTPUrl.Set(span, req.RequestURI)
ext.HTTPMethod.Set(span, req.Method)
if status >= 400 {
ext.Error.Set(span, true)
}
}
}

View File

@ -54,19 +54,31 @@ type DataSource struct {
}
var knownDatasourcePlugins map[string]bool = map[string]bool{
DS_ES: true,
DS_GRAPHITE: true,
DS_INFLUXDB: true,
DS_INFLUXDB_08: true,
DS_KAIROSDB: true,
DS_CLOUDWATCH: true,
DS_PROMETHEUS: true,
DS_OPENTSDB: true,
"opennms": true,
"druid": true,
"dalmatinerdb": true,
"gnocci": true,
"zabbix": true,
DS_ES: true,
DS_GRAPHITE: true,
DS_INFLUXDB: true,
DS_INFLUXDB_08: true,
DS_KAIROSDB: true,
DS_CLOUDWATCH: true,
DS_PROMETHEUS: true,
DS_OPENTSDB: true,
"opennms": true,
"druid": true,
"dalmatinerdb": true,
"gnocci": true,
"zabbix": true,
"newrelic-app": true,
"grafana-datadog-datasource": true,
"grafana-simple-json": true,
"grafana-splunk-datasource": true,
"udoprog-heroic-datasource": true,
"grafana-openfalcon-datasource": true,
"opennms-datasource": true,
"rackerlabs-blueflood-datasource": true,
"crate-datasource": true,
"ayoungprogrammer-finance-datasource": true,
"monasca-datasource": true,
"vertamedia-clickhouse-datasource": true,
}
func IsKnownDataSourcePlugin(dsType string) bool {

View File

@ -48,6 +48,7 @@ func (ds *DataSource) GetHttpTransport() (*http.Transport, error) {
transport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
Renegotiation: tls.RenegotiateFreelyAsClient,
},
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{

View File

@ -112,7 +112,7 @@ func (c *QueryCondition) executeQuery(context *alerting.EvalContext, timeRange *
req := c.getRequestForAlertRule(getDsInfo.Result, timeRange)
result := make(tsdb.TimeSeriesSlice, 0)
resp, err := c.HandleRequest(context.Ctx, req)
resp, err := c.HandleRequest(context.Ctx, getDsInfo.Result, req)
if err != nil {
if err == gocontext.DeadlineExceeded {
return nil, fmt.Errorf("Alert execution exceeded the timeout")
@ -139,8 +139,8 @@ func (c *QueryCondition) executeQuery(context *alerting.EvalContext, timeRange *
return result, nil
}
func (c *QueryCondition) getRequestForAlertRule(datasource *m.DataSource, timeRange *tsdb.TimeRange) *tsdb.Request {
req := &tsdb.Request{
func (c *QueryCondition) getRequestForAlertRule(datasource *m.DataSource, timeRange *tsdb.TimeRange) *tsdb.TsdbQuery {
req := &tsdb.TsdbQuery{
TimeRange: timeRange,
Queries: []*tsdb.Query{
{

View File

@ -168,7 +168,7 @@ func (ctx *queryConditionTestContext) exec() (*alerting.ConditionResult, error)
ctx.condition = condition
condition.HandleRequest = func(context context.Context, req *tsdb.Request) (*tsdb.Response, error) {
condition.HandleRequest = func(context context.Context, dsInfo *m.DataSource, req *tsdb.TsdbQuery) (*tsdb.Response, error) {
return &tsdb.Response{
Results: map[string]*tsdb.QueryResult{
"A": {Series: ctx.series},

View File

@ -2,8 +2,13 @@ package alerting
import (
"context"
"fmt"
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
tlog "github.com/opentracing/opentracing-go/log"
"github.com/benbjohnson/clock"
"github.com/grafana/grafana/pkg/log"
"golang.org/x/sync/errgroup"
@ -99,22 +104,44 @@ func (e *Engine) processJob(grafanaCtx context.Context, job *Job) error {
}()
alertCtx, cancelFn := context.WithTimeout(context.Background(), alertTimeout)
span := opentracing.StartSpan("alert execution")
alertCtx = opentracing.ContextWithSpan(alertCtx, span)
job.Running = true
evalContext := NewEvalContext(alertCtx, job.Rule)
evalContext.Ctx = alertCtx
done := make(chan struct{})
go func() {
defer func() {
if err := recover(); err != nil {
e.log.Error("Alert Panic", "error", err, "stack", log.Stack(1))
ext.Error.Set(span, true)
span.LogFields(
tlog.Error(fmt.Errorf("%v", err)),
tlog.String("message", "failed to execute alert rule. panic was recovered."),
)
span.Finish()
close(done)
}
}()
e.evalHandler.Eval(evalContext)
e.resultHandler.Handle(evalContext)
span.SetTag("alertId", evalContext.Rule.Id)
span.SetTag("dashboardId", evalContext.Rule.DashboardId)
span.SetTag("firing", evalContext.Firing)
span.SetTag("nodatapoints", evalContext.NoDataFound)
if evalContext.Error != nil {
ext.Error.Set(span, true)
span.LogFields(
tlog.Error(evalContext.Error),
tlog.String("message", "alerting execution failed"),
)
}
span.Finish()
close(done)
}()

View File

@ -63,8 +63,8 @@ func (e *DefaultEvalHandler) Eval(context *EvalContext) {
context.EndTime = time.Now()
context.Rule.State = e.getNewState(context)
elapsedTime := context.EndTime.Sub(context.StartTime) / time.Millisecond
metrics.M_Alerting_Execution_Time.Update(elapsedTime)
elapsedTime := context.EndTime.Sub(context.StartTime).Nanoseconds() / int64(time.Millisecond)
metrics.M_Alerting_Execution_Time.Observe(float64(elapsedTime))
}
// This should be move into evalContext once its been refactored.

View File

@ -89,6 +89,11 @@ func (e *DashAlertExtractor) GetAlerts() ([]*m.Alert, error) {
continue
}
panelId, err := panel.Get("id").Int64()
if err != nil {
return nil, fmt.Errorf("panel id is required. err %v", err)
}
// backward compatibility check, can be removed later
enabled, hasEnabled := jsonAlert.CheckGet("enabled")
if hasEnabled && enabled.MustBool() == false {
@ -103,7 +108,7 @@ func (e *DashAlertExtractor) GetAlerts() ([]*m.Alert, error) {
alert := &m.Alert{
DashboardId: e.Dash.Id,
OrgId: e.OrgId,
PanelId: panel.Get("id").MustInt64(),
PanelId: panelId,
Id: jsonAlert.Get("id").MustInt64(),
Name: jsonAlert.Get("name").MustString(),
Handler: jsonAlert.Get("handler").MustInt64(),

Some files were not shown because too many files have changed in this diff Show More