mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge branch 'master' into develop
This commit is contained in:
commit
41e01184aa
21
CHANGELOG.md
21
CHANGELOG.md
@ -7,11 +7,28 @@
|
||||
- UX changes to nav & side menu
|
||||
- New dashboard grid layout system
|
||||
|
||||
# 4.5.0 (unreleased)
|
||||
# 4.6.0 (unreleased)
|
||||
|
||||
## Enhancements
|
||||
## New Features
|
||||
* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
|
||||
* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
|
||||
* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
|
||||
|
||||
## Breaking changes
|
||||
* **Metrics**: The metric structure for internal metrics about Grafana published to graphite has changed. This might break dashboards for internal metrics.
|
||||
|
||||
# 4.5.1 (2017-09-15)
|
||||
|
||||
## Fixes
|
||||
* **MySQL**: Fixed issue with query editor not showing [#9247](https://github.com/grafana/grafana/issues/9247)
|
||||
|
||||
# 4.5.0 (2017-09-14)
|
||||
|
||||
## Fixes & Enhancements since beta1
|
||||
* **Security**: Security fix for api vulnerability (in multiple org setups).
|
||||
* **Shortcuts**: Adds shortcut for creating new dashboard [#8876](https://github.com/grafana/grafana/pull/8876) thx [@mtanda](https://github.com/mtanda)
|
||||
* **Graph**: Right Y-Axis label position fixed [#9172](https://github.com/grafana/grafana/pull/9172)
|
||||
* **General**: Improve rounding of time intervals [#9197](https://github.com/grafana/grafana/pull/9197), thx [@alin-amana](https://github.com/alin-amana)
|
||||
|
||||
# 4.5.0-beta1 (2017-09-05)
|
||||
|
||||
|
@ -13,8 +13,6 @@
|
||||
"tests"
|
||||
],
|
||||
"dependencies": {
|
||||
"jquery": "3.1.0",
|
||||
"lodash": "4.15.0",
|
||||
"angular": "1.6.1",
|
||||
"angular-route": "1.6.1",
|
||||
"angular-mocks": "1.6.1",
|
||||
|
@ -454,7 +454,7 @@ url = https://grafana.com
|
||||
|
||||
#################################### External Image Storage ##############
|
||||
[external_image_storage]
|
||||
# You can choose between (s3, webdav)
|
||||
# You can choose between (s3, webdav, gcs)
|
||||
provider =
|
||||
|
||||
[external_image_storage.s3]
|
||||
@ -467,3 +467,7 @@ url =
|
||||
username =
|
||||
password =
|
||||
public_url =
|
||||
|
||||
[external_image_storage.gcs]
|
||||
key_file =
|
||||
bucket =
|
||||
|
@ -399,7 +399,7 @@
|
||||
#################################### External image storage ##########################
|
||||
[external_image_storage]
|
||||
# Used for uploading images to public servers so they can be included in slack/email messages.
|
||||
# you can choose between (s3, webdav)
|
||||
# you can choose between (s3, webdav, gcs)
|
||||
;provider =
|
||||
|
||||
[external_image_storage.s3]
|
||||
@ -412,3 +412,7 @@
|
||||
;public_url =
|
||||
;username =
|
||||
;password =
|
||||
|
||||
[external_image_storage.gcs]
|
||||
;key_file =
|
||||
;bucket =
|
||||
|
@ -1,5 +1,5 @@
|
||||
postgrestest:
|
||||
image: postgres:latest
|
||||
image: postgres:9.4.14
|
||||
environment:
|
||||
POSTGRES_USER: grafana
|
||||
POSTGRES_PASSWORD: password
|
||||
|
15
docs/sources/administration/metrics.md
Normal file
15
docs/sources/administration/metrics.md
Normal file
@ -0,0 +1,15 @@
|
||||
+++
|
||||
title = "Internal metrics"
|
||||
description = "Internal metrics exposed by Grafana"
|
||||
keywords = ["grafana", "metrics", "internal metrics"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
parent = "admin"
|
||||
weight = 8
|
||||
+++
|
||||
|
||||
# Internal metrics
|
||||
|
||||
Grafana collects some metrics about it self internally. Currently Grafana supports pushing metrics to graphite and exposing them to be scraped by Prometheus.
|
||||
|
||||
To enabled internal metrics you have to enable it under the [metrics] section in your [grafana.ini](http://docs.grafana.org/installation/configuration/#enabled-6) config file.If you want to push metrics to graphite you have also have to configure the [metrics.graphite](http://docs.grafana.org/installation/configuration/#metrics-graphite) section.
|
@ -41,7 +41,7 @@ Proxy access means that the Grafana backend will proxy all requests from the bro
|
||||
Click the ``Select metric`` link to start navigating the metric space. One you start you can continue using the mouse
|
||||
or keyboard arrow keys. You can select a wildcard and still continue.
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_query1_still.png" class="docs-image--center"
|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_query1_still.png"
|
||||
animated-gif="/img/docs/v45/graphite_query1.gif" >}}
|
||||
|
||||
|
||||
@ -52,7 +52,7 @@ a function is selected it will be added and your focus will be in the text box o
|
||||
a parameter just click on it and it will turn into a text box. To delete a function click the function name followed
|
||||
by the x icon.
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_query2_still.png" class="docs-image--center"
|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_query2_still.png"
|
||||
animated-gif="/img/docs/v45/graphite_query2.gif" >}}
|
||||
|
||||
|
||||
@ -60,7 +60,7 @@ by the x icon.
|
||||
|
||||
Some functions like aliasByNode support an optional second argument. To add this parameter specify for example 3,-2 as the first parameter and the function editor will adapt and move the -2 to a second parameter. To remove the second optional parameter just click on it and leave it blank and the editor will remove it.
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_query3_still.png" class="docs-image--center"
|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_query3_still.png"
|
||||
animated-gif="/img/docs/v45/graphite_query3.gif" >}}
|
||||
|
||||
|
||||
@ -68,6 +68,10 @@ Some functions like aliasByNode support an optional second argument. To add this
|
||||
|
||||
You can reference queries by the row “letter” that they’re on (similar to Microsoft Excel). If you add a second query to a graph, you can reference the first query simply by typing in #A. This provides an easy and convenient way to build compounded queries.
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_nested_queries_still.png"
|
||||
animated-gif="/img/docs/v45/graphite_nested_queries.gif" >}}
|
||||
|
||||
|
||||
## Point consolidation
|
||||
|
||||
All Graphite metrics are consolidated so that Graphite doesn't return more data points than there are pixels in the graph. By default,
|
||||
|
@ -41,9 +41,7 @@ mode is also more secure as the username & password will never reach the browser
|
||||
|
||||
## Query Editor
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/influxdb_query_still.png" class="docs-image--center"
|
||||
animated-gif="/img/docs/v45/influxdb_query.gif" >}}
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/influxdb_query_still.png" class="docs-image--no-shadow" animated-gif="/img/docs/v45/influxdb_query.gif" >}}
|
||||
|
||||
You find the InfluxDB editor in the metrics tab in Graph or Singlestat panel's edit mode. You enter edit mode by clicking the
|
||||
panel title, then edit. The editor allows you to select metrics and tags.
|
||||
@ -59,10 +57,8 @@ will automatically adjust the filter tag condition to use the InfluxDB regex mat
|
||||
|
||||
### Field & Aggregation functions
|
||||
In the `SELECT` row you can specify what fields and functions you want to use. If you have a
|
||||
group by time you need an aggregation function. Some functions like derivative require an aggregation function.
|
||||
|
||||
The editor tries simplify and unify this part of the query. For example:
|
||||

|
||||
group by time you need an aggregation function. Some functions like derivative require an aggregation function. The editor tries simplify and unify this part of the query. For example:<br>
|
||||
<br>
|
||||
|
||||
The above will generate the following InfluxDB `SELECT` clause:
|
||||
|
||||
|
@ -11,8 +11,7 @@ weight = 7
|
||||
|
||||
# Using MySQL in Grafana
|
||||
|
||||
> Only available in Grafana v4.3+. This data source is not ready for
|
||||
> production use, currently in development (alpha state).
|
||||
> Only available in Grafana v4.3+.
|
||||
|
||||
Grafana ships with a built-in MySQL data source plugin that allow you to query any visualize
|
||||
data from a MySQL compatible database.
|
||||
@ -58,8 +57,7 @@ If the `Format as` query option is set to `Table` then you can basically do any
|
||||
|
||||
Query editor with example query:
|
||||
|
||||

|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/mysql_table_query.png" >}}
|
||||
|
||||
The query:
|
||||
|
||||
|
@ -39,7 +39,8 @@ Name | Description
|
||||
|
||||
Open a graph in edit mode by click the title > Edit (or by pressing `e` key while hovering over panel).
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/prometheus_query_editor_still.png"
|
||||
animated-gif="/img/docs/v45/prometheus_query_editor.gif" >}}
|
||||
|
||||
Name | Description
|
||||
------- | --------
|
||||
|
@ -12,7 +12,7 @@ weight = 2
|
||||
|
||||
# Table Panel
|
||||
|
||||
<img src="/assets/img/features/table-panel.png">
|
||||
<img class="screenshot" src="/assets/img/features/table-panel.png">
|
||||
|
||||
The new table panel is very flexible, supporting both multiple modes for time series as well as for
|
||||
table, annotation and raw JSON data. It also provides date formatting and value formatting and coloring options.
|
||||
@ -22,55 +22,63 @@ To view table panels in action and test different configurations with sample dat
|
||||
## Options overview
|
||||
|
||||
The table panel has many ways to manipulate your data for optimal presentation.
|
||||
{{< docs-imagebox img="/img/docs/v45/table_options.png" class="docs-image--no-shadow" max-width= "500px" >}}
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v2/table-config2.png">
|
||||
|
||||
1. `Data`: Control how your query is transformed into a table.
|
||||
2. `Table Display`: Table display options.
|
||||
3. `Column Styles`: Column value formatting and display options.
|
||||
2. `Paging`: Table display options.
|
||||
|
||||
|
||||
## Data to Table
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v2/table-data-options.png">
|
||||
{{< docs-imagebox img="/img/docs/v45/table_data_options.png" max-width="500px" class="docs-image--right">}}
|
||||
|
||||
The data section contains the **To Table Transform (1)**. This is the primary option for how your data/metric
|
||||
query should be transformed into a table format. The **Columns (2)** option allows you to select what columns
|
||||
you want in the table. Only applicable for some transforms.
|
||||
|
||||
<div class="clearfix"></div>
|
||||
|
||||
### Time series to rows
|
||||
|
||||
<img src="/img/docs/v2/table_ts_to_rows2.png">
|
||||
{{< docs-imagebox img="/img/docs/v45/table_ts_to_rows.png" >}}
|
||||
|
||||
In the most simple mode you can turn time series to rows. This means you get a `Time`, `Metric` and a `Value` column. Where `Metric` is the name of the time series.
|
||||
|
||||
### Time series to columns
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/table_ts_to_columns.png" >}}
|
||||
|
||||
|
||||
This transform allows you to take multiple time series and group them by time. Which will result in the primary column being `Time` and a column for each time series.
|
||||
|
||||
### Time series aggregations
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/table_ts_to_aggregations.png" >}}
|
||||
|
||||
This table transformation will lay out your table into rows by metric, allowing columns of `Avg`, `Min`, `Max`, `Total`, `Current` and `Count`. More than one column can be added.
|
||||
|
||||
### Annotations
|
||||

|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/table_annotations.png" >}}
|
||||
|
||||
|
||||
If you have annotations enabled in the dashboard you can have the table show them. If you configure this
|
||||
mode then any queries you have in the metrics tab will be ignored.
|
||||
|
||||
### JSON Data
|
||||

|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/table_json_data.png" max-width="500px" >}}
|
||||
|
||||
If you have an Elasticsearch **Raw Document** query or an Elasticsearch query without a `date histogram` use this
|
||||
transform mode and pick the columns using the **Columns** section.
|
||||
|
||||

|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/elastic_raw_doc.png" >}}
|
||||
|
||||
## Table Display
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v2/table-display.png">
|
||||
{{< docs-imagebox img="/img/docs/v45/table_paging.png" class="docs-image--no-shadow docs-image--right" max-width="350px" >}}
|
||||
|
||||
1. `Pagination (Page Size)`: The table display fields allow you to control The `Pagination` (page size) is the threshold at which the table rows will be broken into pages. For example, if your table had 95 records with a pagination value of 10, your table would be split across 9 pages.
|
||||
2. `Scroll`: The `scroll bar` checkbox toggles the ability to scroll within the panel, when unchecked, the panel height will grow to display all rows.
|
||||
@ -81,13 +89,11 @@ transform mode and pick the columns using the **Columns** section.
|
||||
|
||||
The column styles allow you control how dates and numbers are formatted.
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v2/Column-Options.png">
|
||||
{{< docs-imagebox img="/img/docs/v45/table_column_styles.png" class="docs-image--no-shadow" >}}
|
||||
|
||||
1. `Name or regex`: The Name or Regex field controls what columns the rule should be applied to. The regex or name filter will be matched against the column name not against column values.
|
||||
2. `Type`: The three supported types of types are `Number`, `String` and `Date`.
|
||||
3. `Title`: Title for the column, when using a Regex the title can include replacement strings like `$1`.
|
||||
4. `Format`: Specify date format. Only available when `Type` is set to `Date`.
|
||||
5. `Coloring` and `Thresholds`: Specify color mode and thresholds limits.
|
||||
6. `Unit` and `Decimals`: Specify unit and decimal precision for numbers.
|
||||
7. `Add column style rule`: Add new column rule.
|
||||
2. `Column Header`: Title for the column, when using a Regex the title can include replacement strings like `$1`.
|
||||
3. `Add column style rule`: Add new column rule.
|
||||
4. `Thresholds` and `Coloring`: Specify color mode and thresholds limits.
|
||||
5. `Type`: The three supported types of types are `Number`, `String` and `Date`. `Unit` and `Decimals`: Specify unit and decimal precision for numbers.`Format`: Specify date format for dates.
|
||||
|
||||
|
@ -8,7 +8,7 @@ weight = 7
|
||||
|
||||
# Keyboard shortcuts
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v4/shortcuts.png" max-width="20rem" >}}
|
||||
{{< docs-imagebox img="/img/docs/v4/shortcuts.png" max-width="20rem" class="docs-image--right" >}}
|
||||
|
||||
Grafana v4 introduces a number of really powerful keyboard shortcuts. You can now focus a panel
|
||||
by hovering over it with your mouse. With a panel focused you can simple hit `e` to toggle panel
|
||||
|
@ -16,16 +16,13 @@ weight = -4
|
||||
|
||||
### New prometheus query editor
|
||||
|
||||
The new query editor has full syntax highlighting. As well as auto complete for metrics, functions, and range vectors.
|
||||
The new query editor has full syntax highlighting. As well as auto complete for metrics, functions, and range vectors. There is also integrated function docs right from the query editor!
|
||||
|
||||

|
||||
|
||||
There is also integrated function docs right from the query editor!
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/prometheus_query_editor_still.png" class="docs-image--block" animated-gif="/img/docs/v45/prometheus_query_editor.gif" >}}
|
||||
|
||||
### Elasticsearch: Add ad-hoc filters from the table panel
|
||||

|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/elastic_ad_hoc_filters.png" class="docs-image--block" >}}
|
||||
|
||||
### Table cell links!
|
||||
Create column styles that turn cells into links that use the value in the cell (or other other row values) to generate a url to another dashboard or system:
|
||||
|
@ -645,7 +645,7 @@ Time to live for snapshots.
|
||||
These options control how images should be made public so they can be shared on services like slack.
|
||||
|
||||
### provider
|
||||
You can choose between (s3, webdav). If left empty Grafana will ignore the upload action.
|
||||
You can choose between (s3, webdav, gcs). If left empty Grafana will ignore the upload action.
|
||||
|
||||
## [external_image_storage.s3]
|
||||
|
||||
@ -677,6 +677,17 @@ basic auth username
|
||||
### password
|
||||
basic auth password
|
||||
|
||||
## [external_image_storage.gcs]
|
||||
|
||||
### key_file
|
||||
Path to JSON key file associated with a Google service account to authenticate and authorize.
|
||||
Service Account keys can be created and downloaded from https://console.developers.google.com/permissions/serviceaccounts.
|
||||
|
||||
Service Account should have "Storage Object Writer" role.
|
||||
|
||||
### bucket name
|
||||
Bucket Name on Google Cloud Storage.
|
||||
|
||||
## [alerting]
|
||||
|
||||
### enabled
|
||||
|
@ -15,20 +15,23 @@ weight = 1
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for Debian-based Linux | [grafana_4.4.3_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb)
|
||||
Beta for Debian-based Linux | [grafana_4.5.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb)
|
||||
Stable for Debian-based Linux | [grafana_4.5.1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.1_amd64.deb)
|
||||
|
||||
<!-- Beta for Debian-based Linux | [grafana_4.5.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb) -->
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
|
||||
## Install Stable
|
||||
|
||||
|
||||
```bash
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.1_amd64.deb
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_4.4.3_amd64.deb
|
||||
sudo dpkg -i grafana_4.5.1_amd64.deb
|
||||
```
|
||||
|
||||
<!--
|
||||
## Install Latest Beta
|
||||
|
||||
```bash
|
||||
@ -36,6 +39,7 @@ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-b
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_4.5.0-beta1_amd64.deb
|
||||
```
|
||||
-->
|
||||
|
||||
## APT Repository
|
||||
|
||||
|
@ -15,8 +15,9 @@ weight = 2
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.4.3 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm)
|
||||
Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-beta1.x86_64.rpm)
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.1-1.x86_64.rpm)
|
||||
|
||||
<!-- Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-beta1.x86_64.rpm) -->
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@ -25,19 +26,19 @@ installation.
|
||||
|
||||
You can install Grafana using Yum directly.
|
||||
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.1-1.x86_64.rpm
|
||||
|
||||
Or install manually using `rpm`.
|
||||
|
||||
#### On CentOS / Fedora / Redhat:
|
||||
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.1-1.x86_64.rpm
|
||||
$ sudo yum install initscripts fontconfig
|
||||
$ sudo rpm -Uvh grafana-4.4.3-1.x86_64.rpm
|
||||
$ sudo rpm -Uvh grafana-4.5.1-1.x86_64.rpm
|
||||
|
||||
#### On OpenSuse:
|
||||
|
||||
$ sudo rpm -i --nodeps grafana-4.4.3-1.x86_64.rpm
|
||||
$ sudo rpm -i --nodeps grafana-4.5.1-1.x86_64.rpm
|
||||
|
||||
## Install via YUM Repository
|
||||
|
||||
@ -53,8 +54,7 @@ Add the following to a new file at `/etc/yum.repos.d/grafana.repo`
|
||||
sslverify=1
|
||||
sslcacert=/etc/pki/tls/certs/ca-bundle.crt
|
||||
|
||||
There is also a testing repository if you want beta or release
|
||||
candidates.
|
||||
There is also a testing repository if you want beta or release candidates.
|
||||
|
||||
baseurl=https://packagecloud.io/grafana/testing/el/6/$basearch
|
||||
|
||||
|
@ -94,10 +94,10 @@ to the same location (and overwrite the existing files). This might overwrite yo
|
||||
recommend you place your config changes in a file named `<grafana_install_dir>/conf/custom.ini`
|
||||
as this will make upgrades easier without risking losing your config changes.
|
||||
|
||||
## Upgrading form 1.x
|
||||
## Upgrading from 1.x
|
||||
|
||||
[Migrating from 1.x to 2.x]({{< relref "installation/migrating_to2.md" >}})
|
||||
|
||||
## Upgrading form 2.x
|
||||
## Upgrading from 2.x
|
||||
|
||||
We are not aware of any issues upgrading directly from 2.x to 4.x but to on the safe side go via 3.x.
|
||||
We are not aware of any issues upgrading directly from 2.x to 4.x but to be on the safe side go via 3.x => 4.x.
|
||||
|
@ -13,7 +13,7 @@ weight = 3
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Latest stable package for Windows | [grafana.4.4.3.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3.windows-x64.zip)
|
||||
Latest stable package for Windows | [grafana.4.5.1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.1.windows-x64.zip)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
|
@ -16,7 +16,7 @@ Since Grafana automatically scales Dashboards to any resolution they're perfect
|
||||
|
||||
## Creating a Playlist
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v3/playlist.png" max-width="25rem" >}}
|
||||
{{< docs-imagebox img="/img/docs/v3/playlist.png" max-width="25rem" class="docs-image--right">}}
|
||||
|
||||
The Playlist feature can be accessed from Grafana's sidemenu, in the Dashboard submenu.
|
||||
|
||||
|
@ -74,7 +74,8 @@ If you do not get an image when opening this link verify that the required font
|
||||
|
||||
### Grafana API Key
|
||||
|
||||
<img src="/img/docs/v2/orgdropdown_api_keys.png" style="width: 150px" class="right"></img>
|
||||
{{< docs-imagebox img="/img/docs/v2/orgdropdown_api_keys.png" max-width="150px" class="docs-image--right">}}
|
||||
|
||||
You need to set the environment variable `HUBOT_GRAFANA_API_KEY` to a Grafana API Key.
|
||||
You can add these from the API Keys page which you find in the Organization dropdown.
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
"company": "Grafana Labs"
|
||||
},
|
||||
"name": "grafana",
|
||||
"version": "4.5.0-beta1",
|
||||
"version": "4.6.0-pre1",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "http://github.com/grafana/grafana.git"
|
||||
@ -69,8 +69,9 @@
|
||||
"grunt-sass-lint": "^0.2.2",
|
||||
"grunt-sync": "^0.6.2",
|
||||
"jquery-ui-dist": "^1.12.1",
|
||||
"jquery": "^3.2.1",
|
||||
"karma-sinon": "^1.0.5",
|
||||
"lodash": "^4.17.2",
|
||||
"lodash": "^4.17.4",
|
||||
"mousetrap": "^1.6.0",
|
||||
"remarkable": "^1.7.1",
|
||||
"sinon": "1.17.6",
|
||||
|
@ -1,5 +1,5 @@
|
||||
#! /usr/bin/env bash
|
||||
version=4.4.2
|
||||
version=4.5.1
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
|
||||
|
||||
|
@ -35,7 +35,7 @@ func AdminCreateUser(c *middleware.Context, form dtos.AdminCreateUserForm) {
|
||||
return
|
||||
}
|
||||
|
||||
metrics.M_Api_Admin_User_Create.Inc(1)
|
||||
metrics.M_Api_Admin_User_Create.Inc()
|
||||
|
||||
user := cmd.Result
|
||||
|
||||
|
304
pkg/api/api.go
304
pkg/api/api.go
@ -10,7 +10,7 @@ import (
|
||||
|
||||
// Register adds http routes
|
||||
func (hs *HttpServer) registerRoutes() {
|
||||
r := hs.macaron
|
||||
macaronR := hs.macaron
|
||||
reqSignedIn := middleware.Auth(&middleware.AuthOptions{ReqSignedIn: true})
|
||||
reqGrafanaAdmin := middleware.Auth(&middleware.AuthOptions{ReqSignedIn: true, ReqGrafanaAdmin: true})
|
||||
reqEditorRole := middleware.RoleAuth(m.ROLE_EDITOR, m.ROLE_ADMIN)
|
||||
@ -19,7 +19,9 @@ func (hs *HttpServer) registerRoutes() {
|
||||
bind := binding.Bind
|
||||
|
||||
// automatically set HEAD for every GET
|
||||
r.SetAutoHead(true)
|
||||
macaronR.SetAutoHead(true)
|
||||
|
||||
r := newRouteRegister(middleware.RequestMetrics)
|
||||
|
||||
// not logged in views
|
||||
r.Get("/", reqSignedIn, Index)
|
||||
@ -99,220 +101,206 @@ func (hs *HttpServer) registerRoutes() {
|
||||
r.Get("/api/login/ping", quota("session"), LoginApiPing)
|
||||
|
||||
// authed api
|
||||
r.Group("/api", func() {
|
||||
r.Group("/api", func(apiRoute RouteRegister) {
|
||||
|
||||
// user (signed in)
|
||||
r.Group("/user", func() {
|
||||
r.Get("/", wrap(GetSignedInUser))
|
||||
r.Put("/", bind(m.UpdateUserCommand{}), wrap(UpdateSignedInUser))
|
||||
r.Post("/using/:id", wrap(UserSetUsingOrg))
|
||||
r.Get("/orgs", wrap(GetSignedInUserOrgList))
|
||||
apiRoute.Group("/user", func(userRoute RouteRegister) {
|
||||
userRoute.Get("/", wrap(GetSignedInUser))
|
||||
userRoute.Put("/", bind(m.UpdateUserCommand{}), wrap(UpdateSignedInUser))
|
||||
userRoute.Post("/using/:id", wrap(UserSetUsingOrg))
|
||||
userRoute.Get("/orgs", wrap(GetSignedInUserOrgList))
|
||||
|
||||
r.Post("/stars/dashboard/:id", wrap(StarDashboard))
|
||||
r.Delete("/stars/dashboard/:id", wrap(UnstarDashboard))
|
||||
userRoute.Post("/stars/dashboard/:id", wrap(StarDashboard))
|
||||
userRoute.Delete("/stars/dashboard/:id", wrap(UnstarDashboard))
|
||||
|
||||
r.Put("/password", bind(m.ChangeUserPasswordCommand{}), wrap(ChangeUserPassword))
|
||||
r.Get("/quotas", wrap(GetUserQuotas))
|
||||
r.Put("/helpflags/:id", wrap(SetHelpFlag))
|
||||
userRoute.Put("/password", bind(m.ChangeUserPasswordCommand{}), wrap(ChangeUserPassword))
|
||||
userRoute.Get("/quotas", wrap(GetUserQuotas))
|
||||
userRoute.Put("/helpflags/:id", wrap(SetHelpFlag))
|
||||
// For dev purpose
|
||||
r.Get("/helpflags/clear", wrap(ClearHelpFlags))
|
||||
userRoute.Get("/helpflags/clear", wrap(ClearHelpFlags))
|
||||
|
||||
r.Get("/preferences", wrap(GetUserPreferences))
|
||||
r.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateUserPreferences))
|
||||
userRoute.Get("/preferences", wrap(GetUserPreferences))
|
||||
userRoute.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateUserPreferences))
|
||||
})
|
||||
|
||||
// users (admin permission required)
|
||||
r.Group("/users", func() {
|
||||
r.Get("/", wrap(SearchUsers))
|
||||
r.Get("/search", wrap(SearchUsersWithPaging))
|
||||
r.Get("/:id", wrap(GetUserById))
|
||||
r.Get("/:id/orgs", wrap(GetUserOrgList))
|
||||
apiRoute.Group("/users", func(usersRoute RouteRegister) {
|
||||
usersRoute.Get("/", wrap(SearchUsers))
|
||||
usersRoute.Get("/search", wrap(SearchUsersWithPaging))
|
||||
usersRoute.Get("/:id", wrap(GetUserById))
|
||||
usersRoute.Get("/:id/orgs", wrap(GetUserOrgList))
|
||||
// query parameters /users/lookup?loginOrEmail=admin@example.com
|
||||
r.Get("/lookup", wrap(GetUserByLoginOrEmail))
|
||||
r.Put("/:id", bind(m.UpdateUserCommand{}), wrap(UpdateUser))
|
||||
r.Post("/:id/using/:orgId", wrap(UpdateUserActiveOrg))
|
||||
usersRoute.Get("/lookup", wrap(GetUserByLoginOrEmail))
|
||||
usersRoute.Put("/:id", bind(m.UpdateUserCommand{}), wrap(UpdateUser))
|
||||
usersRoute.Post("/:id/using/:orgId", wrap(UpdateUserActiveOrg))
|
||||
}, reqGrafanaAdmin)
|
||||
|
||||
// user group (admin permission required)
|
||||
r.Group("/user-groups", func() {
|
||||
r.Get("/:userGroupId", wrap(GetUserGroupById))
|
||||
r.Get("/search", wrap(SearchUserGroups))
|
||||
r.Post("/", quota("user-groups"), bind(m.CreateUserGroupCommand{}), wrap(CreateUserGroup))
|
||||
r.Put("/:userGroupId", bind(m.UpdateUserGroupCommand{}), wrap(UpdateUserGroup))
|
||||
r.Delete("/:userGroupId", wrap(DeleteUserGroupById))
|
||||
r.Get("/:userGroupId/members", wrap(GetUserGroupMembers))
|
||||
r.Post("/:userGroupId/members", quota("user-groups"), bind(m.AddUserGroupMemberCommand{}), wrap(AddUserGroupMember))
|
||||
r.Delete("/:userGroupId/members/:userId", wrap(RemoveUserGroupMember))
|
||||
apiRoute.Group("/user-groups", func(userGroupsRoute RouteRegister) {
|
||||
userGroupsRoute.Get("/:userGroupId", wrap(GetUserGroupById))
|
||||
userGroupsRoute.Get("/search", wrap(SearchUserGroups))
|
||||
userGroupsRoute.Post("/", quota("user-groups"), bind(m.CreateUserGroupCommand{}), wrap(CreateUserGroup))
|
||||
userGroupsRoute.Put("/:userGroupId", bind(m.UpdateUserGroupCommand{}), wrap(UpdateUserGroup))
|
||||
userGroupsRoute.Delete("/:userGroupId", wrap(DeleteUserGroupById))
|
||||
userGroupsRoute.Get("/:userGroupId/members", wrap(GetUserGroupMembers))
|
||||
userGroupsRoute.Post("/:userGroupId/members", quota("user-groups"), bind(m.AddUserGroupMemberCommand{}), wrap(AddUserGroupMember))
|
||||
userGroupsRoute.Delete("/:userGroupId/members/:userId", wrap(RemoveUserGroupMember))
|
||||
}, reqOrgAdmin)
|
||||
|
||||
// org information available to all users.
|
||||
r.Group("/org", func() {
|
||||
r.Get("/", wrap(GetOrgCurrent))
|
||||
r.Get("/quotas", wrap(GetOrgQuotas))
|
||||
apiRoute.Group("/org", func(orgRoute RouteRegister) {
|
||||
orgRoute.Get("/", wrap(GetOrgCurrent))
|
||||
orgRoute.Get("/quotas", wrap(GetOrgQuotas))
|
||||
})
|
||||
|
||||
// current org
|
||||
r.Group("/org", func() {
|
||||
r.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrgCurrent))
|
||||
r.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddressCurrent))
|
||||
r.Post("/users", quota("user"), bind(m.AddOrgUserCommand{}), wrap(AddOrgUserToCurrentOrg))
|
||||
r.Get("/users", wrap(GetOrgUsersForCurrentOrg))
|
||||
r.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUserForCurrentOrg))
|
||||
r.Delete("/users/:userId", wrap(RemoveOrgUserForCurrentOrg))
|
||||
apiRoute.Group("/org", func(orgRoute RouteRegister) {
|
||||
orgRoute.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrgCurrent))
|
||||
orgRoute.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddressCurrent))
|
||||
orgRoute.Post("/users", quota("user"), bind(m.AddOrgUserCommand{}), wrap(AddOrgUserToCurrentOrg))
|
||||
orgRoute.Get("/users", wrap(GetOrgUsersForCurrentOrg))
|
||||
orgRoute.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUserForCurrentOrg))
|
||||
orgRoute.Delete("/users/:userId", wrap(RemoveOrgUserForCurrentOrg))
|
||||
|
||||
// invites
|
||||
r.Get("/invites", wrap(GetPendingOrgInvites))
|
||||
r.Post("/invites", quota("user"), bind(dtos.AddInviteForm{}), wrap(AddOrgInvite))
|
||||
r.Patch("/invites/:code/revoke", wrap(RevokeInvite))
|
||||
orgRoute.Get("/invites", wrap(GetPendingOrgInvites))
|
||||
orgRoute.Post("/invites", quota("user"), bind(dtos.AddInviteForm{}), wrap(AddOrgInvite))
|
||||
orgRoute.Patch("/invites/:code/revoke", wrap(RevokeInvite))
|
||||
|
||||
// prefs
|
||||
r.Get("/preferences", wrap(GetOrgPreferences))
|
||||
r.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateOrgPreferences))
|
||||
orgRoute.Get("/preferences", wrap(GetOrgPreferences))
|
||||
orgRoute.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateOrgPreferences))
|
||||
}, reqOrgAdmin)
|
||||
|
||||
// create new org
|
||||
r.Post("/orgs", quota("org"), bind(m.CreateOrgCommand{}), wrap(CreateOrg))
|
||||
apiRoute.Post("/orgs", quota("org"), bind(m.CreateOrgCommand{}), wrap(CreateOrg))
|
||||
|
||||
// search all orgs
|
||||
r.Get("/orgs", reqGrafanaAdmin, wrap(SearchOrgs))
|
||||
apiRoute.Get("/orgs", reqGrafanaAdmin, wrap(SearchOrgs))
|
||||
|
||||
// orgs (admin routes)
|
||||
r.Group("/orgs/:orgId", func() {
|
||||
r.Get("/", wrap(GetOrgById))
|
||||
r.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrg))
|
||||
r.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddress))
|
||||
r.Delete("/", wrap(DeleteOrgById))
|
||||
r.Get("/users", wrap(GetOrgUsers))
|
||||
r.Post("/users", bind(m.AddOrgUserCommand{}), wrap(AddOrgUser))
|
||||
r.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUser))
|
||||
r.Delete("/users/:userId", wrap(RemoveOrgUser))
|
||||
r.Get("/quotas", wrap(GetOrgQuotas))
|
||||
r.Put("/quotas/:target", bind(m.UpdateOrgQuotaCmd{}), wrap(UpdateOrgQuota))
|
||||
apiRoute.Group("/orgs/:orgId", func(orgsRoute RouteRegister) {
|
||||
orgsRoute.Get("/", wrap(GetOrgById))
|
||||
orgsRoute.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrg))
|
||||
orgsRoute.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddress))
|
||||
orgsRoute.Delete("/", wrap(DeleteOrgById))
|
||||
orgsRoute.Get("/users", wrap(GetOrgUsers))
|
||||
orgsRoute.Post("/users", bind(m.AddOrgUserCommand{}), wrap(AddOrgUser))
|
||||
orgsRoute.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUser))
|
||||
orgsRoute.Delete("/users/:userId", wrap(RemoveOrgUser))
|
||||
orgsRoute.Get("/quotas", wrap(GetOrgQuotas))
|
||||
orgsRoute.Put("/quotas/:target", bind(m.UpdateOrgQuotaCmd{}), wrap(UpdateOrgQuota))
|
||||
}, reqGrafanaAdmin)
|
||||
|
||||
// orgs (admin routes)
|
||||
r.Group("/orgs/name/:name", func() {
|
||||
r.Get("/", wrap(GetOrgByName))
|
||||
apiRoute.Group("/orgs/name/:name", func(orgsRoute RouteRegister) {
|
||||
orgsRoute.Get("/", wrap(GetOrgByName))
|
||||
}, reqGrafanaAdmin)
|
||||
|
||||
// auth api keys
|
||||
r.Group("/auth/keys", func() {
|
||||
r.Get("/", wrap(GetApiKeys))
|
||||
r.Post("/", quota("api_key"), bind(m.AddApiKeyCommand{}), wrap(AddApiKey))
|
||||
r.Delete("/:id", wrap(DeleteApiKey))
|
||||
apiRoute.Group("/auth/keys", func(keysRoute RouteRegister) {
|
||||
keysRoute.Get("/", wrap(GetApiKeys))
|
||||
keysRoute.Post("/", quota("api_key"), bind(m.AddApiKeyCommand{}), wrap(AddApiKey))
|
||||
keysRoute.Delete("/:id", wrap(DeleteApiKey))
|
||||
}, reqOrgAdmin)
|
||||
|
||||
// Preferences
|
||||
r.Group("/preferences", func() {
|
||||
r.Post("/set-home-dash", bind(m.SavePreferencesCommand{}), wrap(SetHomeDashboard))
|
||||
apiRoute.Group("/preferences", func(prefRoute RouteRegister) {
|
||||
prefRoute.Post("/set-home-dash", bind(m.SavePreferencesCommand{}), wrap(SetHomeDashboard))
|
||||
})
|
||||
|
||||
// Data sources
|
||||
r.Group("/datasources", func() {
|
||||
r.Get("/", wrap(GetDataSources))
|
||||
r.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), AddDataSource)
|
||||
r.Put("/:id", bind(m.UpdateDataSourceCommand{}), wrap(UpdateDataSource))
|
||||
r.Delete("/:id", DeleteDataSourceById)
|
||||
r.Delete("/name/:name", DeleteDataSourceByName)
|
||||
r.Get("/:id", wrap(GetDataSourceById))
|
||||
r.Get("/name/:name", wrap(GetDataSourceByName))
|
||||
apiRoute.Group("/datasources", func(datasourceRoute RouteRegister) {
|
||||
datasourceRoute.Get("/", wrap(GetDataSources))
|
||||
datasourceRoute.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), AddDataSource)
|
||||
datasourceRoute.Put("/:id", bind(m.UpdateDataSourceCommand{}), wrap(UpdateDataSource))
|
||||
datasourceRoute.Delete("/:id", DeleteDataSourceById)
|
||||
datasourceRoute.Delete("/name/:name", DeleteDataSourceByName)
|
||||
datasourceRoute.Get("/:id", wrap(GetDataSourceById))
|
||||
datasourceRoute.Get("/name/:name", wrap(GetDataSourceByName))
|
||||
}, reqOrgAdmin)
|
||||
|
||||
r.Get("/datasources/id/:name", wrap(GetDataSourceIdByName), reqSignedIn)
|
||||
apiRoute.Get("/datasources/id/:name", wrap(GetDataSourceIdByName), reqSignedIn)
|
||||
|
||||
r.Get("/plugins", wrap(GetPluginList))
|
||||
r.Get("/plugins/:pluginId/settings", wrap(GetPluginSettingById))
|
||||
r.Get("/plugins/:pluginId/markdown/:name", wrap(GetPluginMarkdown))
|
||||
apiRoute.Get("/plugins", wrap(GetPluginList))
|
||||
apiRoute.Get("/plugins/:pluginId/settings", wrap(GetPluginSettingById))
|
||||
apiRoute.Get("/plugins/:pluginId/markdown/:name", wrap(GetPluginMarkdown))
|
||||
|
||||
r.Group("/plugins", func() {
|
||||
r.Get("/:pluginId/dashboards/", wrap(GetPluginDashboards))
|
||||
r.Post("/:pluginId/settings", bind(m.UpdatePluginSettingCmd{}), wrap(UpdatePluginSetting))
|
||||
apiRoute.Group("/plugins", func(pluginRoute RouteRegister) {
|
||||
pluginRoute.Get("/:pluginId/dashboards/", wrap(GetPluginDashboards))
|
||||
pluginRoute.Post("/:pluginId/settings", bind(m.UpdatePluginSettingCmd{}), wrap(UpdatePluginSetting))
|
||||
}, reqOrgAdmin)
|
||||
|
||||
r.Get("/frontend/settings/", GetFrontendSettings)
|
||||
r.Any("/datasources/proxy/:id/*", reqSignedIn, hs.ProxyDataSourceRequest)
|
||||
r.Any("/datasources/proxy/:id", reqSignedIn, hs.ProxyDataSourceRequest)
|
||||
apiRoute.Get("/frontend/settings/", GetFrontendSettings)
|
||||
apiRoute.Any("/datasources/proxy/:id/*", reqSignedIn, hs.ProxyDataSourceRequest)
|
||||
apiRoute.Any("/datasources/proxy/:id", reqSignedIn, hs.ProxyDataSourceRequest)
|
||||
|
||||
// Dashboard
|
||||
r.Group("/dashboards", func() {
|
||||
r.Get("/db/:slug", wrap(GetDashboard))
|
||||
r.Delete("/db/:slug", wrap(DeleteDashboard))
|
||||
r.Post("/db", bind(m.SaveDashboardCommand{}), wrap(PostDashboard))
|
||||
apiRoute.Group("/dashboards", func(dashboardRoute RouteRegister) {
|
||||
dashboardRoute.Get("/db/:slug", wrap(GetDashboard))
|
||||
dashboardRoute.Delete("/db/:slug", reqEditorRole, wrap(DeleteDashboard))
|
||||
|
||||
r.Get("/id/:dashboardId/versions", wrap(GetDashboardVersions))
|
||||
r.Get("/id/:dashboardId/versions/:id", wrap(GetDashboardVersion))
|
||||
r.Post("/id/:dashboardId/restore", reqEditorRole, bind(dtos.RestoreDashboardVersionCommand{}), wrap(RestoreDashboardVersion))
|
||||
dashboardRoute.Get("/id/:dashboardId/versions", wrap(GetDashboardVersions))
|
||||
dashboardRoute.Get("/id/:dashboardId/versions/:id", wrap(GetDashboardVersion))
|
||||
dashboardRoute.Post("/id/:dashboardId/restore", reqEditorRole, bind(dtos.RestoreDashboardVersionCommand{}), wrap(RestoreDashboardVersion))
|
||||
|
||||
r.Post("/calculate-diff", bind(dtos.CalculateDiffOptions{}), wrap(CalculateDashboardDiff))
|
||||
r.Get("/home", wrap(GetHomeDashboard))
|
||||
r.Get("/tags", GetDashboardTags)
|
||||
r.Post("/import", bind(dtos.ImportDashboardCommand{}), wrap(ImportDashboard))
|
||||
dashboardRoute.Post("/calculate-diff", bind(dtos.CalculateDiffOptions{}), wrap(CalculateDashboardDiff))
|
||||
|
||||
r.Group("/id/:dashboardId", func() {
|
||||
r.Get("/versions", wrap(GetDashboardVersions))
|
||||
r.Get("/versions/:id", wrap(GetDashboardVersion))
|
||||
r.Post("/restore", bind(dtos.RestoreDashboardVersionCommand{}), wrap(RestoreDashboardVersion))
|
||||
|
||||
r.Group("/acl", func() {
|
||||
r.Get("/", wrap(GetDashboardAclList))
|
||||
r.Post("/", bind(dtos.UpdateDashboardAclCommand{}), wrap(UpdateDashboardAcl))
|
||||
r.Delete("/:aclId", wrap(DeleteDashboardAcl))
|
||||
})
|
||||
}, reqSignedIn)
|
||||
dashboardRoute.Post("/db", reqEditorRole, bind(m.SaveDashboardCommand{}), wrap(PostDashboard))
|
||||
dashboardRoute.Get("/home", wrap(GetHomeDashboard))
|
||||
dashboardRoute.Get("/tags", GetDashboardTags)
|
||||
dashboardRoute.Post("/import", bind(dtos.ImportDashboardCommand{}), wrap(ImportDashboard))
|
||||
})
|
||||
|
||||
// Dashboard snapshots
|
||||
r.Group("/dashboard/snapshots", func() {
|
||||
r.Get("/", wrap(SearchDashboardSnapshots))
|
||||
apiRoute.Group("/dashboard/snapshots", func(dashboardRoute RouteRegister) {
|
||||
dashboardRoute.Get("/", wrap(SearchDashboardSnapshots))
|
||||
})
|
||||
|
||||
// Playlist
|
||||
r.Group("/playlists", func() {
|
||||
r.Get("/", wrap(SearchPlaylists))
|
||||
r.Get("/:id", ValidateOrgPlaylist, wrap(GetPlaylist))
|
||||
r.Get("/:id/items", ValidateOrgPlaylist, wrap(GetPlaylistItems))
|
||||
r.Get("/:id/dashboards", ValidateOrgPlaylist, wrap(GetPlaylistDashboards))
|
||||
r.Delete("/:id", reqEditorRole, ValidateOrgPlaylist, wrap(DeletePlaylist))
|
||||
r.Put("/:id", reqEditorRole, bind(m.UpdatePlaylistCommand{}), ValidateOrgPlaylist, wrap(UpdatePlaylist))
|
||||
r.Post("/", reqEditorRole, bind(m.CreatePlaylistCommand{}), wrap(CreatePlaylist))
|
||||
apiRoute.Group("/playlists", func(playlistRoute RouteRegister) {
|
||||
playlistRoute.Get("/", wrap(SearchPlaylists))
|
||||
playlistRoute.Get("/:id", ValidateOrgPlaylist, wrap(GetPlaylist))
|
||||
playlistRoute.Get("/:id/items", ValidateOrgPlaylist, wrap(GetPlaylistItems))
|
||||
playlistRoute.Get("/:id/dashboards", ValidateOrgPlaylist, wrap(GetPlaylistDashboards))
|
||||
playlistRoute.Delete("/:id", reqEditorRole, ValidateOrgPlaylist, wrap(DeletePlaylist))
|
||||
playlistRoute.Put("/:id", reqEditorRole, bind(m.UpdatePlaylistCommand{}), ValidateOrgPlaylist, wrap(UpdatePlaylist))
|
||||
playlistRoute.Post("/", reqEditorRole, bind(m.CreatePlaylistCommand{}), wrap(CreatePlaylist))
|
||||
})
|
||||
|
||||
// Search
|
||||
r.Get("/search/", Search)
|
||||
apiRoute.Get("/search/", Search)
|
||||
|
||||
// metrics
|
||||
r.Post("/tsdb/query", bind(dtos.MetricRequest{}), wrap(QueryMetrics))
|
||||
r.Get("/tsdb/testdata/scenarios", wrap(GetTestDataScenarios))
|
||||
r.Get("/tsdb/testdata/gensql", reqGrafanaAdmin, wrap(GenerateSqlTestData))
|
||||
r.Get("/tsdb/testdata/random-walk", wrap(GetTestDataRandomWalk))
|
||||
apiRoute.Post("/tsdb/query", bind(dtos.MetricRequest{}), wrap(QueryMetrics))
|
||||
apiRoute.Get("/tsdb/testdata/scenarios", wrap(GetTestDataScenarios))
|
||||
apiRoute.Get("/tsdb/testdata/gensql", reqGrafanaAdmin, wrap(GenerateSqlTestData))
|
||||
apiRoute.Get("/tsdb/testdata/random-walk", wrap(GetTestDataRandomWalk))
|
||||
|
||||
// metrics
|
||||
r.Get("/metrics", wrap(GetInternalMetrics))
|
||||
|
||||
r.Group("/alerts", func() {
|
||||
r.Post("/test", bind(dtos.AlertTestCommand{}), wrap(AlertTest))
|
||||
r.Post("/:alertId/pause", bind(dtos.PauseAlertCommand{}), wrap(PauseAlert), reqEditorRole)
|
||||
r.Get("/:alertId", ValidateOrgAlert, wrap(GetAlert))
|
||||
r.Get("/", wrap(GetAlerts))
|
||||
r.Get("/states-for-dashboard", wrap(GetAlertStatesForDashboard))
|
||||
apiRoute.Group("/alerts", func(alertsRoute RouteRegister) {
|
||||
alertsRoute.Post("/test", bind(dtos.AlertTestCommand{}), wrap(AlertTest))
|
||||
alertsRoute.Post("/:alertId/pause", bind(dtos.PauseAlertCommand{}), wrap(PauseAlert), reqEditorRole)
|
||||
alertsRoute.Get("/:alertId", ValidateOrgAlert, wrap(GetAlert))
|
||||
alertsRoute.Get("/", wrap(GetAlerts))
|
||||
alertsRoute.Get("/states-for-dashboard", wrap(GetAlertStatesForDashboard))
|
||||
})
|
||||
|
||||
r.Get("/alert-notifications", wrap(GetAlertNotifications))
|
||||
r.Get("/alert-notifiers", wrap(GetAlertNotifiers))
|
||||
apiRoute.Get("/alert-notifications", wrap(GetAlertNotifications))
|
||||
apiRoute.Get("/alert-notifiers", wrap(GetAlertNotifiers))
|
||||
|
||||
r.Group("/alert-notifications", func() {
|
||||
r.Post("/test", bind(dtos.NotificationTestCommand{}), wrap(NotificationTest))
|
||||
r.Post("/", bind(m.CreateAlertNotificationCommand{}), wrap(CreateAlertNotification))
|
||||
r.Put("/:notificationId", bind(m.UpdateAlertNotificationCommand{}), wrap(UpdateAlertNotification))
|
||||
r.Get("/:notificationId", wrap(GetAlertNotificationById))
|
||||
r.Delete("/:notificationId", wrap(DeleteAlertNotification))
|
||||
apiRoute.Group("/alert-notifications", func(alertNotifications RouteRegister) {
|
||||
alertNotifications.Post("/test", bind(dtos.NotificationTestCommand{}), wrap(NotificationTest))
|
||||
alertNotifications.Post("/", bind(m.CreateAlertNotificationCommand{}), wrap(CreateAlertNotification))
|
||||
alertNotifications.Put("/:notificationId", bind(m.UpdateAlertNotificationCommand{}), wrap(UpdateAlertNotification))
|
||||
alertNotifications.Get("/:notificationId", wrap(GetAlertNotificationById))
|
||||
alertNotifications.Delete("/:notificationId", wrap(DeleteAlertNotification))
|
||||
}, reqEditorRole)
|
||||
|
||||
r.Get("/annotations", wrap(GetAnnotations))
|
||||
r.Post("/annotations/mass-delete", reqOrgAdmin, bind(dtos.DeleteAnnotationsCmd{}), wrap(DeleteAnnotations))
|
||||
apiRoute.Get("/annotations", wrap(GetAnnotations))
|
||||
apiRoute.Post("/annotations/mass-delete", reqOrgAdmin, bind(dtos.DeleteAnnotationsCmd{}), wrap(DeleteAnnotations))
|
||||
|
||||
r.Group("/annotations", func() {
|
||||
r.Post("/", bind(dtos.PostAnnotationsCmd{}), wrap(PostAnnotation))
|
||||
apiRoute.Group("/annotations", func(annotationsRoute RouteRegister) {
|
||||
annotationsRoute.Post("/", bind(dtos.PostAnnotationsCmd{}), wrap(PostAnnotation))
|
||||
}, reqEditorRole)
|
||||
|
||||
// error test
|
||||
@ -321,16 +309,16 @@ func (hs *HttpServer) registerRoutes() {
|
||||
}, reqSignedIn)
|
||||
|
||||
// admin api
|
||||
r.Group("/api/admin", func() {
|
||||
r.Get("/settings", AdminGetSettings)
|
||||
r.Post("/users", bind(dtos.AdminCreateUserForm{}), AdminCreateUser)
|
||||
r.Put("/users/:id/password", bind(dtos.AdminUpdateUserPasswordForm{}), AdminUpdateUserPassword)
|
||||
r.Put("/users/:id/permissions", bind(dtos.AdminUpdateUserPermissionsForm{}), AdminUpdateUserPermissions)
|
||||
r.Delete("/users/:id", AdminDeleteUser)
|
||||
r.Get("/users/:id/quotas", wrap(GetUserQuotas))
|
||||
r.Put("/users/:id/quotas/:target", bind(m.UpdateUserQuotaCmd{}), wrap(UpdateUserQuota))
|
||||
r.Get("/stats", AdminGetStats)
|
||||
r.Post("/pause-all-alerts", bind(dtos.PauseAllAlertsCommand{}), wrap(PauseAllAlerts))
|
||||
r.Group("/api/admin", func(adminRoute RouteRegister) {
|
||||
adminRoute.Get("/settings", AdminGetSettings)
|
||||
adminRoute.Post("/users", bind(dtos.AdminCreateUserForm{}), AdminCreateUser)
|
||||
adminRoute.Put("/users/:id/password", bind(dtos.AdminUpdateUserPasswordForm{}), AdminUpdateUserPassword)
|
||||
adminRoute.Put("/users/:id/permissions", bind(dtos.AdminUpdateUserPermissionsForm{}), AdminUpdateUserPermissions)
|
||||
adminRoute.Delete("/users/:id", AdminDeleteUser)
|
||||
adminRoute.Get("/users/:id/quotas", wrap(GetUserQuotas))
|
||||
adminRoute.Put("/users/:id/quotas/:target", bind(m.UpdateUserQuotaCmd{}), wrap(UpdateUserQuota))
|
||||
adminRoute.Get("/stats", AdminGetStats)
|
||||
adminRoute.Post("/pause-all-alerts", bind(dtos.PauseAllAlertsCommand{}), wrap(PauseAllAlerts))
|
||||
}, reqGrafanaAdmin)
|
||||
|
||||
// rendering
|
||||
@ -349,7 +337,9 @@ func (hs *HttpServer) registerRoutes() {
|
||||
// streams
|
||||
//r.Post("/api/streams/push", reqSignedIn, bind(dtos.StreamMessage{}), liveConn.PushToStream)
|
||||
|
||||
InitAppPluginRoutes(r)
|
||||
r.Register(macaronR)
|
||||
|
||||
r.NotFound(NotFoundHandler)
|
||||
InitAppPluginRoutes(macaronR)
|
||||
|
||||
macaronR.NotFound(NotFoundHandler)
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func handleGetMetricStatistics(req *cwRequest, c *middleware.Context) {
|
||||
c.JsonApiErr(500, "Unable to call AWS API", err)
|
||||
return
|
||||
}
|
||||
metrics.M_Aws_CloudWatch_GetMetricStatistics.Inc(1)
|
||||
metrics.M_Aws_CloudWatch_GetMetricStatistics.Inc()
|
||||
|
||||
c.JSON(200, resp)
|
||||
}
|
||||
@ -302,7 +302,7 @@ func handleListMetrics(req *cwRequest, c *middleware.Context) {
|
||||
var resp cloudwatch.ListMetricsOutput
|
||||
err = svc.ListMetricsPages(params,
|
||||
func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool {
|
||||
metrics.M_Aws_CloudWatch_ListMetrics.Inc(1)
|
||||
metrics.M_Aws_CloudWatch_ListMetrics.Inc()
|
||||
metrics, _ := awsutil.ValuesAtPath(page, "Metrics")
|
||||
for _, metric := range metrics {
|
||||
resp.Metrics = append(resp.Metrics, metric.(*cloudwatch.Metric))
|
||||
|
@ -275,7 +275,7 @@ func getAllMetrics(cwData *datasourceInfo) (cloudwatch.ListMetricsOutput, error)
|
||||
var resp cloudwatch.ListMetricsOutput
|
||||
err = svc.ListMetricsPages(params,
|
||||
func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool {
|
||||
metrics.M_Aws_CloudWatch_ListMetrics.Inc(1)
|
||||
metrics.M_Aws_CloudWatch_ListMetrics.Inc()
|
||||
metrics, _ := awsutil.ValuesAtPath(page, "Metrics")
|
||||
for _, metric := range metrics {
|
||||
resp.Metrics = append(resp.Metrics, metric.(*cloudwatch.Metric))
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/api/dtos"
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/guardian"
|
||||
@ -59,7 +58,6 @@ func UpdateDashboardAcl(c *middleware.Context, apiCmd dtos.UpdateDashboardAclCom
|
||||
return ApiError(500, "Failed to create permission", err)
|
||||
}
|
||||
|
||||
metrics.M_Api_Dashboard_Acl_Update.Inc(1)
|
||||
return ApiSuccess("Dashboard acl updated")
|
||||
}
|
||||
|
||||
|
@ -34,13 +34,13 @@ func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapsho
|
||||
|
||||
cmd.OrgId = -1
|
||||
cmd.UserId = -1
|
||||
metrics.M_Api_Dashboard_Snapshot_External.Inc(1)
|
||||
metrics.M_Api_Dashboard_Snapshot_External.Inc()
|
||||
} else {
|
||||
cmd.Key = util.GetRandomString(32)
|
||||
cmd.DeleteKey = util.GetRandomString(32)
|
||||
cmd.OrgId = c.OrgId
|
||||
cmd.UserId = c.UserId
|
||||
metrics.M_Api_Dashboard_Snapshot_Create.Inc(1)
|
||||
metrics.M_Api_Dashboard_Snapshot_Create.Inc()
|
||||
}
|
||||
|
||||
if err := bus.Dispatch(&cmd); err != nil {
|
||||
@ -84,7 +84,7 @@ func GetDashboardSnapshot(c *middleware.Context) {
|
||||
},
|
||||
}
|
||||
|
||||
metrics.M_Api_Dashboard_Snapshot_Get.Inc(1)
|
||||
metrics.M_Api_Dashboard_Snapshot_Get.Inc()
|
||||
|
||||
c.Resp.Header().Set("Cache-Control", "public, max-age=3600")
|
||||
c.JSON(200, dto)
|
||||
|
@ -62,6 +62,8 @@ func getFrontendSettingsMap(c *middleware.Context) (map[string]interface{}, erro
|
||||
|
||||
if ds.JsonData != nil {
|
||||
dsMap["jsonData"] = ds.JsonData
|
||||
} else {
|
||||
dsMap["jsonData"] = make(map[string]string)
|
||||
}
|
||||
|
||||
if ds.Access == m.DS_ACCESS_DIRECT {
|
||||
|
@ -11,6 +11,8 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
gocache "github.com/patrickmn/go-cache"
|
||||
macaron "gopkg.in/macaron.v1"
|
||||
|
||||
@ -165,9 +167,9 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron {
|
||||
}))
|
||||
|
||||
m.Use(hs.healthHandler)
|
||||
m.Use(hs.metricsEndpoint)
|
||||
m.Use(middleware.GetContextHandler())
|
||||
m.Use(middleware.Sessioner(&setting.SessionOptions))
|
||||
m.Use(middleware.RequestMetrics())
|
||||
m.Use(middleware.OrgRedirect())
|
||||
|
||||
// needs to be after context handler
|
||||
@ -180,6 +182,14 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron {
|
||||
return m
|
||||
}
|
||||
|
||||
func (hs *HttpServer) metricsEndpoint(ctx *macaron.Context) {
|
||||
if ctx.Req.Method != "GET" || ctx.Req.URL.Path != "/metrics" {
|
||||
return
|
||||
}
|
||||
|
||||
promhttp.Handler().ServeHTTP(ctx.Resp, ctx.Req.Request)
|
||||
}
|
||||
|
||||
func (hs *HttpServer) healthHandler(ctx *macaron.Context) {
|
||||
if ctx.Req.Method != "GET" || ctx.Req.URL.Path != "/api/health" {
|
||||
return
|
||||
|
@ -127,7 +127,7 @@ func LoginPost(c *middleware.Context, cmd dtos.LoginCommand) Response {
|
||||
c.SetCookie("redirect_to", "", -1, setting.AppSubUrl+"/")
|
||||
}
|
||||
|
||||
metrics.M_Api_Login_Post.Inc(1)
|
||||
metrics.M_Api_Login_Post.Inc()
|
||||
|
||||
return Json(200, result)
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ func OAuthLogin(ctx *middleware.Context) {
|
||||
// login
|
||||
loginUserWithUser(userQuery.Result, ctx)
|
||||
|
||||
metrics.M_Api_Login_OAuth.Inc(1)
|
||||
metrics.M_Api_Login_OAuth.Inc()
|
||||
|
||||
if redirectTo, _ := url.QueryUnescape(ctx.GetCookie("redirect_to")); len(redirectTo) > 0 {
|
||||
ctx.SetCookie("redirect_to", "", -1, setting.AppSubUrl+"/")
|
||||
|
@ -2,13 +2,10 @@ package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api/dtos"
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
@ -79,58 +76,6 @@ func GetTestDataScenarios(c *middleware.Context) Response {
|
||||
return Json(200, &result)
|
||||
}
|
||||
|
||||
func GetInternalMetrics(c *middleware.Context) Response {
|
||||
if metrics.UseNilMetrics {
|
||||
return Json(200, util.DynMap{"message": "Metrics disabled"})
|
||||
}
|
||||
|
||||
snapshots := metrics.MetricStats.GetSnapshots()
|
||||
|
||||
resp := make(map[string]interface{})
|
||||
|
||||
for _, m := range snapshots {
|
||||
metricName := m.Name() + m.StringifyTags()
|
||||
|
||||
switch metric := m.(type) {
|
||||
case metrics.Gauge:
|
||||
resp[metricName] = map[string]interface{}{
|
||||
"value": metric.Value(),
|
||||
}
|
||||
case metrics.Counter:
|
||||
resp[metricName] = map[string]interface{}{
|
||||
"count": metric.Count(),
|
||||
}
|
||||
case metrics.Timer:
|
||||
percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
|
||||
resp[metricName] = map[string]interface{}{
|
||||
"count": metric.Count(),
|
||||
"min": metric.Min(),
|
||||
"max": metric.Max(),
|
||||
"mean": metric.Mean(),
|
||||
"std": metric.StdDev(),
|
||||
"p25": percentiles[0],
|
||||
"p75": percentiles[1],
|
||||
"p90": percentiles[2],
|
||||
"p99": percentiles[3],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var b []byte
|
||||
var err error
|
||||
if b, err = json.MarshalIndent(resp, "", " "); err != nil {
|
||||
return ApiError(500, "body json marshal", err)
|
||||
}
|
||||
|
||||
return &NormalResponse{
|
||||
body: b,
|
||||
status: 200,
|
||||
header: http.Header{
|
||||
"Content-Type": []string{"application/json"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Genereates a index out of range error
|
||||
func GenerateError(c *middleware.Context) Response {
|
||||
var array []string
|
||||
|
@ -89,7 +89,7 @@ func CreateOrg(c *middleware.Context, cmd m.CreateOrgCommand) Response {
|
||||
return ApiError(500, "Failed to create organization", err)
|
||||
}
|
||||
|
||||
metrics.M_Api_Org_Create.Inc(1)
|
||||
metrics.M_Api_Org_Create.Inc()
|
||||
|
||||
return Json(200, &util.DynMap{
|
||||
"orgId": cmd.Result.Id,
|
||||
|
@ -187,8 +187,8 @@ func CompleteInvite(c *middleware.Context, completeInvite dtos.CompleteInviteFor
|
||||
|
||||
loginUserWithUser(user, c)
|
||||
|
||||
metrics.M_Api_User_SignUpCompleted.Inc(1)
|
||||
metrics.M_Api_User_SignUpInvite.Inc(1)
|
||||
metrics.M_Api_User_SignUpCompleted.Inc()
|
||||
metrics.M_Api_User_SignUpInvite.Inc()
|
||||
|
||||
return ApiSuccess("User created and logged in")
|
||||
}
|
||||
|
115
pkg/api/route_register.go
Normal file
115
pkg/api/route_register.go
Normal file
@ -0,0 +1,115 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
macaron "gopkg.in/macaron.v1"
|
||||
)
|
||||
|
||||
type Router interface {
|
||||
Handle(method, pattern string, handlers []macaron.Handler) *macaron.Route
|
||||
}
|
||||
|
||||
type RouteRegister interface {
|
||||
Get(string, ...macaron.Handler)
|
||||
Post(string, ...macaron.Handler)
|
||||
Delete(string, ...macaron.Handler)
|
||||
Put(string, ...macaron.Handler)
|
||||
Patch(string, ...macaron.Handler)
|
||||
Any(string, ...macaron.Handler)
|
||||
|
||||
Group(string, func(RouteRegister), ...macaron.Handler)
|
||||
|
||||
Register(Router) *macaron.Router
|
||||
}
|
||||
|
||||
type RegisterNamedMiddleware func(name string) macaron.Handler
|
||||
|
||||
func newRouteRegister(namedMiddleware ...RegisterNamedMiddleware) RouteRegister {
|
||||
return &routeRegister{
|
||||
prefix: "",
|
||||
routes: []route{},
|
||||
subfixHandlers: []macaron.Handler{},
|
||||
namedMiddleware: namedMiddleware,
|
||||
}
|
||||
}
|
||||
|
||||
type route struct {
|
||||
method string
|
||||
pattern string
|
||||
handlers []macaron.Handler
|
||||
}
|
||||
|
||||
type routeRegister struct {
|
||||
prefix string
|
||||
subfixHandlers []macaron.Handler
|
||||
namedMiddleware []RegisterNamedMiddleware
|
||||
routes []route
|
||||
groups []*routeRegister
|
||||
}
|
||||
|
||||
func (rr *routeRegister) Group(pattern string, fn func(rr RouteRegister), handlers ...macaron.Handler) {
|
||||
group := &routeRegister{
|
||||
prefix: rr.prefix + pattern,
|
||||
subfixHandlers: append(rr.subfixHandlers, handlers...),
|
||||
routes: []route{},
|
||||
namedMiddleware: rr.namedMiddleware,
|
||||
}
|
||||
|
||||
fn(group)
|
||||
rr.groups = append(rr.groups, group)
|
||||
}
|
||||
|
||||
func (rr *routeRegister) Register(router Router) *macaron.Router {
|
||||
for _, r := range rr.routes {
|
||||
router.Handle(r.method, r.pattern, r.handlers)
|
||||
}
|
||||
|
||||
for _, g := range rr.groups {
|
||||
g.Register(router)
|
||||
}
|
||||
|
||||
return &macaron.Router{}
|
||||
}
|
||||
|
||||
func (rr *routeRegister) route(pattern, method string, handlers ...macaron.Handler) {
|
||||
//inject tracing
|
||||
|
||||
h := make([]macaron.Handler, 0)
|
||||
for _, fn := range rr.namedMiddleware {
|
||||
h = append(h, fn(pattern))
|
||||
}
|
||||
|
||||
h = append(h, rr.subfixHandlers...)
|
||||
h = append(h, handlers...)
|
||||
|
||||
rr.routes = append(rr.routes, route{
|
||||
method: method,
|
||||
pattern: rr.prefix + pattern,
|
||||
handlers: h,
|
||||
})
|
||||
}
|
||||
|
||||
func (rr *routeRegister) Get(pattern string, handlers ...macaron.Handler) {
|
||||
rr.route(pattern, http.MethodGet, handlers...)
|
||||
}
|
||||
|
||||
func (rr *routeRegister) Post(pattern string, handlers ...macaron.Handler) {
|
||||
rr.route(pattern, http.MethodPost, handlers...)
|
||||
}
|
||||
|
||||
func (rr *routeRegister) Delete(pattern string, handlers ...macaron.Handler) {
|
||||
rr.route(pattern, http.MethodDelete, handlers...)
|
||||
}
|
||||
|
||||
func (rr *routeRegister) Put(pattern string, handlers ...macaron.Handler) {
|
||||
rr.route(pattern, http.MethodPut, handlers...)
|
||||
}
|
||||
|
||||
func (rr *routeRegister) Patch(pattern string, handlers ...macaron.Handler) {
|
||||
rr.route(pattern, http.MethodPatch, handlers...)
|
||||
}
|
||||
|
||||
func (rr *routeRegister) Any(pattern string, handlers ...macaron.Handler) {
|
||||
rr.route(pattern, "*", handlers...)
|
||||
}
|
185
pkg/api/route_register_test.go
Normal file
185
pkg/api/route_register_test.go
Normal file
@ -0,0 +1,185 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
macaron "gopkg.in/macaron.v1"
|
||||
)
|
||||
|
||||
type fakeRouter struct {
|
||||
route []route
|
||||
}
|
||||
|
||||
func (fr *fakeRouter) Handle(method, pattern string, handlers []macaron.Handler) *macaron.Route {
|
||||
fr.route = append(fr.route, route{
|
||||
pattern: pattern,
|
||||
method: method,
|
||||
handlers: handlers,
|
||||
})
|
||||
|
||||
return &macaron.Route{}
|
||||
}
|
||||
|
||||
func emptyHandlers(n int) []macaron.Handler {
|
||||
res := []macaron.Handler{}
|
||||
for i := 1; n >= i; i++ {
|
||||
res = append(res, emptyHandler(strconv.Itoa(i)))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func emptyHandler(name string) macaron.Handler {
|
||||
return struct{ name string }{name: name}
|
||||
}
|
||||
|
||||
func TestRouteSimpleRegister(t *testing.T) {
|
||||
testTable := []route{
|
||||
{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(2)},
|
||||
{method: "GET", pattern: "/down", handlers: emptyHandlers(3)},
|
||||
}
|
||||
|
||||
// Setup
|
||||
rr := newRouteRegister(func(name string) macaron.Handler {
|
||||
return emptyHandler(name)
|
||||
})
|
||||
|
||||
rr.Delete("/admin", emptyHandler("1"))
|
||||
rr.Get("/down", emptyHandler("1"), emptyHandler("2"))
|
||||
|
||||
fr := &fakeRouter{}
|
||||
rr.Register(fr)
|
||||
|
||||
// Validation
|
||||
if len(fr.route) != len(testTable) {
|
||||
t.Errorf("want %v routes, got %v", len(testTable), len(fr.route))
|
||||
}
|
||||
|
||||
for i := range testTable {
|
||||
if testTable[i].method != fr.route[i].method {
|
||||
t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method)
|
||||
}
|
||||
|
||||
if testTable[i].pattern != fr.route[i].pattern {
|
||||
t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern)
|
||||
}
|
||||
|
||||
if len(testTable[i].handlers) != len(fr.route[i].handlers) {
|
||||
t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n",
|
||||
len(testTable[i].handlers),
|
||||
len(fr.route[i].handlers),
|
||||
testTable[i],
|
||||
fr.route[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouteGroupedRegister(t *testing.T) {
|
||||
testTable := []route{
|
||||
{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(1)},
|
||||
{method: "GET", pattern: "/down", handlers: emptyHandlers(2)},
|
||||
{method: "POST", pattern: "/user", handlers: emptyHandlers(1)},
|
||||
{method: "PUT", pattern: "/user/friends", handlers: emptyHandlers(1)},
|
||||
{method: "DELETE", pattern: "/user/admin", handlers: emptyHandlers(2)},
|
||||
{method: "GET", pattern: "/user/admin/all", handlers: emptyHandlers(4)},
|
||||
}
|
||||
|
||||
// Setup
|
||||
rr := newRouteRegister()
|
||||
|
||||
rr.Delete("/admin", emptyHandler("1"))
|
||||
rr.Get("/down", emptyHandler("1"), emptyHandler("2"))
|
||||
|
||||
rr.Group("/user", func(user RouteRegister) {
|
||||
user.Post("", emptyHandler("1"))
|
||||
user.Put("/friends", emptyHandler("2"))
|
||||
|
||||
user.Group("/admin", func(admin RouteRegister) {
|
||||
admin.Delete("", emptyHandler("3"))
|
||||
admin.Get("/all", emptyHandler("3"), emptyHandler("4"), emptyHandler("5"))
|
||||
|
||||
}, emptyHandler("3"))
|
||||
})
|
||||
|
||||
fr := &fakeRouter{}
|
||||
rr.Register(fr)
|
||||
|
||||
// Validation
|
||||
if len(fr.route) != len(testTable) {
|
||||
t.Errorf("want %v routes, got %v", len(testTable), len(fr.route))
|
||||
}
|
||||
|
||||
for i := range testTable {
|
||||
if testTable[i].method != fr.route[i].method {
|
||||
t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method)
|
||||
}
|
||||
|
||||
if testTable[i].pattern != fr.route[i].pattern {
|
||||
t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern)
|
||||
}
|
||||
|
||||
if len(testTable[i].handlers) != len(fr.route[i].handlers) {
|
||||
t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n",
|
||||
len(testTable[i].handlers),
|
||||
len(fr.route[i].handlers),
|
||||
testTable[i],
|
||||
fr.route[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamedMiddlewareRouteRegister(t *testing.T) {
|
||||
testTable := []route{
|
||||
{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(2)},
|
||||
{method: "GET", pattern: "/down", handlers: emptyHandlers(3)},
|
||||
{method: "POST", pattern: "/user", handlers: emptyHandlers(2)},
|
||||
{method: "PUT", pattern: "/user/friends", handlers: emptyHandlers(2)},
|
||||
{method: "DELETE", pattern: "/user/admin", handlers: emptyHandlers(3)},
|
||||
{method: "GET", pattern: "/user/admin/all", handlers: emptyHandlers(5)},
|
||||
}
|
||||
|
||||
// Setup
|
||||
rr := newRouteRegister(func(name string) macaron.Handler {
|
||||
return emptyHandler(name)
|
||||
})
|
||||
|
||||
rr.Delete("/admin", emptyHandler("1"))
|
||||
rr.Get("/down", emptyHandler("1"), emptyHandler("2"))
|
||||
|
||||
rr.Group("/user", func(user RouteRegister) {
|
||||
user.Post("", emptyHandler("1"))
|
||||
user.Put("/friends", emptyHandler("2"))
|
||||
|
||||
user.Group("/admin", func(admin RouteRegister) {
|
||||
admin.Delete("", emptyHandler("3"))
|
||||
admin.Get("/all", emptyHandler("3"), emptyHandler("4"), emptyHandler("5"))
|
||||
|
||||
}, emptyHandler("3"))
|
||||
})
|
||||
|
||||
fr := &fakeRouter{}
|
||||
rr.Register(fr)
|
||||
|
||||
// Validation
|
||||
if len(fr.route) != len(testTable) {
|
||||
t.Errorf("want %v routes, got %v", len(testTable), len(fr.route))
|
||||
}
|
||||
|
||||
for i := range testTable {
|
||||
if testTable[i].method != fr.route[i].method {
|
||||
t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method)
|
||||
}
|
||||
|
||||
if testTable[i].pattern != fr.route[i].pattern {
|
||||
t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern)
|
||||
}
|
||||
|
||||
if len(testTable[i].handlers) != len(fr.route[i].handlers) {
|
||||
t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n",
|
||||
len(testTable[i].handlers),
|
||||
len(fr.route[i].handlers),
|
||||
testTable[i],
|
||||
fr.route[i])
|
||||
}
|
||||
}
|
||||
}
|
@ -47,7 +47,7 @@ func SignUp(c *middleware.Context, form dtos.SignUpForm) Response {
|
||||
Code: cmd.Code,
|
||||
})
|
||||
|
||||
metrics.M_Api_User_SignUpStarted.Inc(1)
|
||||
metrics.M_Api_User_SignUpStarted.Inc()
|
||||
|
||||
return Json(200, util.DynMap{"status": "SignUpCreated"})
|
||||
}
|
||||
@ -111,7 +111,7 @@ func SignUpStep2(c *middleware.Context, form dtos.SignUpStep2Form) Response {
|
||||
}
|
||||
|
||||
loginUserWithUser(user, c)
|
||||
metrics.M_Api_User_SignUpCompleted.Inc(1)
|
||||
metrics.M_Api_User_SignUpCompleted.Inc()
|
||||
|
||||
return Json(200, apiResponse)
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package api
|
||||
|
||||
import (
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
@ -18,8 +17,6 @@ func CreateUserGroup(c *middleware.Context, cmd m.CreateUserGroupCommand) Respon
|
||||
return ApiError(500, "Failed to create User Group", err)
|
||||
}
|
||||
|
||||
metrics.M_Api_UserGroup_Create.Inc(1)
|
||||
|
||||
return Json(200, &util.DynMap{
|
||||
"userGroupId": cmd.Result.Id,
|
||||
"message": "User Group created",
|
||||
|
@ -54,7 +54,7 @@ func (g *GrafanaServerImpl) Start() {
|
||||
g.writePIDFile()
|
||||
|
||||
initSql()
|
||||
metrics.Init()
|
||||
metrics.Init(setting.Cfg)
|
||||
search.Init()
|
||||
login.Init()
|
||||
social.NewOAuthService()
|
||||
|
88
pkg/components/imguploader/gcsuploader.go
Normal file
88
pkg/components/imguploader/gcsuploader.go
Normal file
@ -0,0 +1,88 @@
|
||||
package imguploader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
const (
|
||||
tokenUrl string = "https://www.googleapis.com/auth/devstorage.read_write"
|
||||
uploadUrl string = "https://www.googleapis.com/upload/storage/v1/b/%s/o?uploadType=media&name=%s&predefinedAcl=publicRead"
|
||||
)
|
||||
|
||||
type GCSUploader struct {
|
||||
keyFile string
|
||||
bucket string
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func NewGCSUploader(keyFile, bucket string) *GCSUploader {
|
||||
return &GCSUploader{
|
||||
keyFile: keyFile,
|
||||
bucket: bucket,
|
||||
log: log.New("gcsuploader"),
|
||||
}
|
||||
}
|
||||
|
||||
func (u *GCSUploader) Upload(ctx context.Context, imageDiskPath string) (string, error) {
|
||||
key := util.GetRandomString(20) + ".png"
|
||||
|
||||
u.log.Debug("Opening key file ", u.keyFile)
|
||||
data, err := ioutil.ReadFile(u.keyFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
u.log.Debug("Creating JWT conf")
|
||||
conf, err := google.JWTConfigFromJSON(data, tokenUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
u.log.Debug("Creating HTTP client")
|
||||
client := conf.Client(ctx)
|
||||
err = u.uploadFile(client, imageDiskPath, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("https://storage.googleapis.com/%s/%s", u.bucket, key), nil
|
||||
}
|
||||
|
||||
func (u *GCSUploader) uploadFile(client *http.Client, imageDiskPath, key string) error {
|
||||
u.log.Debug("Opening image file ", imageDiskPath)
|
||||
|
||||
fileReader, err := os.Open(imageDiskPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reqUrl := fmt.Sprintf(uploadUrl, u.bucket, key)
|
||||
u.log.Debug("Request URL: ", reqUrl)
|
||||
|
||||
req, err := http.NewRequest("POST", reqUrl, fileReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Add("Content-Type", "image/png")
|
||||
u.log.Debug("Sending POST request to GCS")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("GCS response status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
24
pkg/components/imguploader/gcsuploader_test.go
Normal file
24
pkg/components/imguploader/gcsuploader_test.go
Normal file
@ -0,0 +1,24 @@
|
||||
package imguploader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestUploadToGCS(t *testing.T) {
|
||||
SkipConvey("[Integration test] for external_image_store.gcs", t, func() {
|
||||
setting.NewConfigContext(&setting.CommandLineArgs{
|
||||
HomePath: "../../../",
|
||||
})
|
||||
|
||||
gcsUploader, _ := NewImageUploader()
|
||||
|
||||
path, err := gcsUploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(path, ShouldNotEqual, "")
|
||||
})
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
package imguploader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
@ -8,13 +9,13 @@ import (
|
||||
)
|
||||
|
||||
type ImageUploader interface {
|
||||
Upload(path string) (string, error)
|
||||
Upload(ctx context.Context, path string) (string, error)
|
||||
}
|
||||
|
||||
type NopImageUploader struct {
|
||||
}
|
||||
|
||||
func (NopImageUploader) Upload(path string) (string, error) {
|
||||
func (NopImageUploader) Upload(ctx context.Context, path string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@ -52,6 +53,16 @@ func NewImageUploader() (ImageUploader, error) {
|
||||
password := webdavSec.Key("password").String()
|
||||
|
||||
return NewWebdavImageUploader(url, username, password, public_url)
|
||||
case "gcs":
|
||||
gcssec, err := setting.Cfg.GetSection("external_image_storage.gcs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keyFile := gcssec.Key("key_file").MustString("")
|
||||
bucketName := gcssec.Key("bucket").MustString("")
|
||||
|
||||
return NewGCSUploader(keyFile, bucketName), nil
|
||||
}
|
||||
|
||||
return NopImageUploader{}, nil
|
||||
|
@ -96,5 +96,28 @@ func TestImageUploaderFactory(t *testing.T) {
|
||||
So(original.username, ShouldEqual, "username")
|
||||
So(original.password, ShouldEqual, "password")
|
||||
})
|
||||
|
||||
Convey("GCS uploader", func() {
|
||||
var err error
|
||||
|
||||
setting.NewConfigContext(&setting.CommandLineArgs{
|
||||
HomePath: "../../../",
|
||||
})
|
||||
|
||||
setting.ImageUploadProvider = "gcs"
|
||||
|
||||
gcpSec, err := setting.Cfg.GetSection("external_image_storage.gcs")
|
||||
gcpSec.NewKey("key_file", "/etc/secrets/project-79a52befa3f6.json")
|
||||
gcpSec.NewKey("bucket", "project-grafana-east")
|
||||
|
||||
uploader, err := NewImageUploader()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
original, ok := uploader.(*GCSUploader)
|
||||
|
||||
So(ok, ShouldBeTrue)
|
||||
So(original.keyFile, ShouldEqual, "/etc/secrets/project-79a52befa3f6.json")
|
||||
So(original.bucket, ShouldEqual, "project-grafana-east")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package imguploader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
@ -34,7 +35,7 @@ func NewS3Uploader(region, bucket, acl, accessKey, secretKey string) *S3Uploader
|
||||
}
|
||||
}
|
||||
|
||||
func (u *S3Uploader) Upload(imageDiskPath string) (string, error) {
|
||||
func (u *S3Uploader) Upload(ctx context.Context, imageDiskPath string) (string, error) {
|
||||
sess, err := session.NewSession()
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -1,6 +1,7 @@
|
||||
package imguploader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
@ -15,7 +16,7 @@ func TestUploadToS3(t *testing.T) {
|
||||
|
||||
s3Uploader, _ := NewImageUploader()
|
||||
|
||||
path, err := s3Uploader.Upload("../../../public/img/logo_transparent_400x.png")
|
||||
path, err := s3Uploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(path, ShouldNotEqual, "")
|
||||
|
@ -2,6 +2,7 @@ package imguploader
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@ -33,7 +34,7 @@ var netClient = &http.Client{
|
||||
Transport: netTransport,
|
||||
}
|
||||
|
||||
func (u *WebdavUploader) Upload(pa string) (string, error) {
|
||||
func (u *WebdavUploader) Upload(ctx context.Context, pa string) (string, error) {
|
||||
url, _ := url.Parse(u.url)
|
||||
filename := util.GetRandomString(20) + ".png"
|
||||
url.Path = path.Join(url.Path, filename)
|
||||
|
@ -1,6 +1,7 @@
|
||||
package imguploader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
@ -11,7 +12,7 @@ func TestUploadToWebdav(t *testing.T) {
|
||||
// Can be tested with this docker container: https://hub.docker.com/r/morrisjobke/webdav/
|
||||
SkipConvey("[Integration test] for external_image_store.webdav", t, func() {
|
||||
webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "")
|
||||
path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png")
|
||||
path, err := webdavUploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(path, ShouldStartWith, "http://localhost:8888/webdav/")
|
||||
@ -19,7 +20,7 @@ func TestUploadToWebdav(t *testing.T) {
|
||||
|
||||
SkipConvey("[Integration test] for external_image_store.webdav with public url", t, func() {
|
||||
webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "http://publicurl:8888/webdav")
|
||||
path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png")
|
||||
path, err := webdavUploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(path, ShouldStartWith, "http://publicurl:8888/webdav/")
|
||||
|
@ -1,122 +0,0 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// EWMAs continuously calculate an exponentially-weighted moving average
|
||||
// based on an outside source of clock ticks.
|
||||
type EWMA interface {
|
||||
Rate() float64
|
||||
Snapshot() EWMA
|
||||
Tick()
|
||||
Update(int64)
|
||||
}
|
||||
|
||||
// NewEWMA constructs a new EWMA with the given alpha.
|
||||
func NewEWMA(alpha float64) EWMA {
|
||||
if UseNilMetrics {
|
||||
return NilEWMA{}
|
||||
}
|
||||
return &StandardEWMA{alpha: alpha}
|
||||
}
|
||||
|
||||
// NewEWMA1 constructs a new EWMA for a one-minute moving average.
|
||||
func NewEWMA1() EWMA {
|
||||
return NewEWMA(1 - math.Exp(-5.0/60.0/1))
|
||||
}
|
||||
|
||||
// NewEWMA5 constructs a new EWMA for a five-minute moving average.
|
||||
func NewEWMA5() EWMA {
|
||||
return NewEWMA(1 - math.Exp(-5.0/60.0/5))
|
||||
}
|
||||
|
||||
// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
|
||||
func NewEWMA15() EWMA {
|
||||
return NewEWMA(1 - math.Exp(-5.0/60.0/15))
|
||||
}
|
||||
|
||||
// EWMASnapshot is a read-only copy of another EWMA.
|
||||
type EWMASnapshot float64
|
||||
|
||||
// Rate returns the rate of events per second at the time the snapshot was
|
||||
// taken.
|
||||
func (a EWMASnapshot) Rate() float64 { return float64(a) }
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (a EWMASnapshot) Snapshot() EWMA { return a }
|
||||
|
||||
// Tick panics.
|
||||
func (EWMASnapshot) Tick() {
|
||||
panic("Tick called on an EWMASnapshot")
|
||||
}
|
||||
|
||||
// Update panics.
|
||||
func (EWMASnapshot) Update(int64) {
|
||||
panic("Update called on an EWMASnapshot")
|
||||
}
|
||||
|
||||
// NilEWMA is a no-op EWMA.
|
||||
type NilEWMA struct{}
|
||||
|
||||
// Rate is a no-op.
|
||||
func (NilEWMA) Rate() float64 { return 0.0 }
|
||||
|
||||
// Snapshot is a no-op.
|
||||
func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
|
||||
|
||||
// Tick is a no-op.
|
||||
func (NilEWMA) Tick() {}
|
||||
|
||||
// Update is a no-op.
|
||||
func (NilEWMA) Update(n int64) {}
|
||||
|
||||
// StandardEWMA is the standard implementation of an EWMA and tracks the number
|
||||
// of uncounted events and processes them on each tick. It uses the
|
||||
// sync/atomic package to manage uncounted events.
|
||||
type StandardEWMA struct {
|
||||
uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
|
||||
alpha float64
|
||||
rate float64
|
||||
init bool
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// Rate returns the moving average rate of events per second.
|
||||
func (a *StandardEWMA) Rate() float64 {
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
return a.rate * float64(1e9)
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the EWMA.
|
||||
func (a *StandardEWMA) Snapshot() EWMA {
|
||||
return EWMASnapshot(a.Rate())
|
||||
}
|
||||
|
||||
// Tick ticks the clock to update the moving average. It assumes it is called
|
||||
// every five seconds.
|
||||
func (a *StandardEWMA) Tick() {
|
||||
count := atomic.LoadInt64(&a.uncounted)
|
||||
atomic.AddInt64(&a.uncounted, -count)
|
||||
instantRate := float64(count) / float64(5e9)
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
if a.init {
|
||||
a.rate += a.alpha * (instantRate - a.rate)
|
||||
} else {
|
||||
a.init = true
|
||||
a.rate = instantRate
|
||||
}
|
||||
}
|
||||
|
||||
// Update adds n uncounted events.
|
||||
func (a *StandardEWMA) Update(n int64) {
|
||||
atomic.AddInt64(&a.uncounted, n)
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
package metrics
|
||||
|
||||
// type comboCounterRef struct {
|
||||
// *MetricMeta
|
||||
// usageCounter Counter
|
||||
// metricCounter Counter
|
||||
// }
|
||||
//
|
||||
// func RegComboCounter(name string, tagStrings ...string) Counter {
|
||||
// meta := NewMetricMeta(name, tagStrings)
|
||||
// cr := &comboCounterRef{
|
||||
// MetricMeta: meta,
|
||||
// usageCounter: NewCounter(meta),
|
||||
// metricCounter: NewCounter(meta),
|
||||
// }
|
||||
//
|
||||
// UsageStats.Register(cr.usageCounter)
|
||||
// MetricStats.Register(cr.metricCounter)
|
||||
//
|
||||
// return cr
|
||||
// }
|
||||
//
|
||||
// func (c comboCounterRef) Clear() {
|
||||
// c.usageCounter.Clear()
|
||||
// c.metricCounter.Clear()
|
||||
// }
|
||||
//
|
||||
// func (c comboCounterRef) Count() int64 {
|
||||
// panic("Count called on a combocounter ref")
|
||||
// }
|
||||
//
|
||||
// // Dec panics.
|
||||
// func (c comboCounterRef) Dec(i int64) {
|
||||
// c.usageCounter.Dec(i)
|
||||
// c.metricCounter.Dec(i)
|
||||
// }
|
||||
//
|
||||
// // Inc panics.
|
||||
// func (c comboCounterRef) Inc(i int64) {
|
||||
// c.usageCounter.Inc(i)
|
||||
// c.metricCounter.Inc(i)
|
||||
// }
|
||||
//
|
||||
// func (c comboCounterRef) Snapshot() Metric {
|
||||
// return c.metricCounter.Snapshot()
|
||||
// }
|
@ -1,61 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import "github.com/grafana/grafana/pkg/log"
|
||||
|
||||
type MetricMeta struct {
|
||||
tags map[string]string
|
||||
name string
|
||||
}
|
||||
|
||||
func NewMetricMeta(name string, tagStrings []string) *MetricMeta {
|
||||
if len(tagStrings)%2 != 0 {
|
||||
log.Fatal(3, "Metrics: tags array is missing value for key, %v", tagStrings)
|
||||
}
|
||||
|
||||
tags := make(map[string]string)
|
||||
for i := 0; i < len(tagStrings); i += 2 {
|
||||
tags[tagStrings[i]] = tagStrings[i+1]
|
||||
}
|
||||
|
||||
return &MetricMeta{
|
||||
tags: tags,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MetricMeta) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
func (m *MetricMeta) GetTagsCopy() map[string]string {
|
||||
if len(m.tags) == 0 {
|
||||
return make(map[string]string)
|
||||
}
|
||||
|
||||
copy := make(map[string]string)
|
||||
for k2, v2 := range m.tags {
|
||||
copy[k2] = v2
|
||||
}
|
||||
|
||||
return copy
|
||||
}
|
||||
|
||||
func (m *MetricMeta) StringifyTags() string {
|
||||
if len(m.tags) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
str := ""
|
||||
for key, value := range m.tags {
|
||||
str += "." + key + "_" + value
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
type Metric interface {
|
||||
Name() string
|
||||
GetTagsCopy() map[string]string
|
||||
StringifyTags() string
|
||||
Snapshot() Metric
|
||||
}
|
@ -1,61 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// Counters hold an int64 value that can be incremented and decremented.
|
||||
type Counter interface {
|
||||
Metric
|
||||
|
||||
Clear()
|
||||
Count() int64
|
||||
Dec(int64)
|
||||
Inc(int64)
|
||||
}
|
||||
|
||||
// NewCounter constructs a new StandardCounter.
|
||||
func NewCounter(meta *MetricMeta) Counter {
|
||||
return &StandardCounter{
|
||||
MetricMeta: meta,
|
||||
count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func RegCounter(name string, tagStrings ...string) Counter {
|
||||
cr := NewCounter(NewMetricMeta(name, tagStrings))
|
||||
MetricStats.Register(cr)
|
||||
return cr
|
||||
}
|
||||
|
||||
// StandardCounter is the standard implementation of a Counter and uses the
|
||||
// sync/atomic package to manage a single int64 value.
|
||||
type StandardCounter struct {
|
||||
count int64 //Due to a bug in golang the 64bit variable need to come first to be 64bit aligned. https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
*MetricMeta
|
||||
}
|
||||
|
||||
// Clear sets the counter to zero.
|
||||
func (c *StandardCounter) Clear() {
|
||||
atomic.StoreInt64(&c.count, 0)
|
||||
}
|
||||
|
||||
// Count returns the current count.
|
||||
func (c *StandardCounter) Count() int64 {
|
||||
return atomic.LoadInt64(&c.count)
|
||||
}
|
||||
|
||||
// Dec decrements the counter by the given amount.
|
||||
func (c *StandardCounter) Dec(i int64) {
|
||||
atomic.AddInt64(&c.count, -i)
|
||||
}
|
||||
|
||||
// Inc increments the counter by the given amount.
|
||||
func (c *StandardCounter) Inc(i int64) {
|
||||
atomic.AddInt64(&c.count, i)
|
||||
}
|
||||
|
||||
func (c *StandardCounter) Snapshot() Metric {
|
||||
return &StandardCounter{
|
||||
MetricMeta: c.MetricMeta,
|
||||
count: c.count,
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import "math"
|
||||
|
||||
func calculateDelta(oldValue, newValue int64) int64 {
|
||||
if oldValue < newValue {
|
||||
return newValue - oldValue
|
||||
} else {
|
||||
return (math.MaxInt64 - oldValue) + (newValue - math.MinInt64) + 1
|
||||
}
|
||||
}
|
@ -1,83 +0,0 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// Gauges hold an int64 value that can be set arbitrarily.
|
||||
type Gauge interface {
|
||||
Metric
|
||||
|
||||
Update(int64)
|
||||
Value() int64
|
||||
}
|
||||
|
||||
func NewGauge(meta *MetricMeta) Gauge {
|
||||
if UseNilMetrics {
|
||||
return NilGauge{}
|
||||
}
|
||||
return &StandardGauge{
|
||||
MetricMeta: meta,
|
||||
value: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func RegGauge(name string, tagStrings ...string) Gauge {
|
||||
tr := NewGauge(NewMetricMeta(name, tagStrings))
|
||||
MetricStats.Register(tr)
|
||||
return tr
|
||||
}
|
||||
|
||||
// GaugeSnapshot is a read-only copy of another Gauge.
|
||||
type GaugeSnapshot struct {
|
||||
value int64
|
||||
*MetricMeta
|
||||
}
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (g GaugeSnapshot) Snapshot() Metric { return g }
|
||||
|
||||
// Update panics.
|
||||
func (GaugeSnapshot) Update(int64) {
|
||||
panic("Update called on a GaugeSnapshot")
|
||||
}
|
||||
|
||||
// Value returns the value at the time the snapshot was taken.
|
||||
func (g GaugeSnapshot) Value() int64 { return g.value }
|
||||
|
||||
// NilGauge is a no-op Gauge.
|
||||
type NilGauge struct{ *MetricMeta }
|
||||
|
||||
// Snapshot is a no-op.
|
||||
func (NilGauge) Snapshot() Metric { return NilGauge{} }
|
||||
|
||||
// Update is a no-op.
|
||||
func (NilGauge) Update(v int64) {}
|
||||
|
||||
// Value is a no-op.
|
||||
func (NilGauge) Value() int64 { return 0 }
|
||||
|
||||
// StandardGauge is the standard implementation of a Gauge and uses the
|
||||
// sync/atomic package to manage a single int64 value.
|
||||
// atomic needs 64-bit aligned memory which is ensure for first word
|
||||
type StandardGauge struct {
|
||||
value int64
|
||||
*MetricMeta
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the gauge.
|
||||
func (g *StandardGauge) Snapshot() Metric {
|
||||
return GaugeSnapshot{MetricMeta: g.MetricMeta, value: g.value}
|
||||
}
|
||||
|
||||
// Update updates the gauge's value.
|
||||
func (g *StandardGauge) Update(v int64) {
|
||||
atomic.StoreInt64(&g.value, v)
|
||||
}
|
||||
|
||||
// Value returns the gauge's current value.
|
||||
func (g *StandardGauge) Value() int64 {
|
||||
return atomic.LoadInt64(&g.value)
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
|
||||
type GraphitePublisher struct {
|
||||
address string
|
||||
protocol string
|
||||
prefix string
|
||||
prevCounts map[string]int64
|
||||
}
|
||||
|
||||
func CreateGraphitePublisher() (*GraphitePublisher, error) {
|
||||
graphiteSection, err := setting.Cfg.GetSection("metrics.graphite")
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
address := graphiteSection.Key("address").String()
|
||||
if address == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
publisher := &GraphitePublisher{}
|
||||
publisher.prevCounts = make(map[string]int64)
|
||||
publisher.protocol = "tcp"
|
||||
publisher.prefix = graphiteSection.Key("prefix").MustString("prod.grafana.%(instance_name)s")
|
||||
publisher.address = address
|
||||
|
||||
safeInstanceName := strings.Replace(setting.InstanceName, ".", "_", -1)
|
||||
prefix := graphiteSection.Key("prefix").Value()
|
||||
|
||||
if prefix == "" {
|
||||
prefix = "prod.grafana.%(instance_name)s."
|
||||
}
|
||||
|
||||
publisher.prefix = strings.Replace(prefix, "%(instance_name)s", safeInstanceName, -1)
|
||||
return publisher, nil
|
||||
}
|
||||
|
||||
func (this *GraphitePublisher) Publish(metrics []Metric) {
|
||||
conn, err := net.DialTimeout(this.protocol, this.address, time.Second*5)
|
||||
|
||||
if err != nil {
|
||||
log.Error(3, "Metrics: GraphitePublisher: Failed to connect to %s!", err)
|
||||
return
|
||||
}
|
||||
|
||||
buf := bytes.NewBufferString("")
|
||||
now := time.Now().Unix()
|
||||
|
||||
for _, m := range metrics {
|
||||
metricName := this.prefix + m.Name() + m.StringifyTags()
|
||||
|
||||
switch metric := m.(type) {
|
||||
case Counter:
|
||||
this.addCount(buf, metricName+".count", metric.Count(), now)
|
||||
case Gauge:
|
||||
this.addCount(buf, metricName, metric.Value(), now)
|
||||
case Timer:
|
||||
percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
|
||||
this.addCount(buf, metricName+".count", metric.Count(), now)
|
||||
this.addInt(buf, metricName+".max", metric.Max(), now)
|
||||
this.addInt(buf, metricName+".min", metric.Min(), now)
|
||||
this.addFloat(buf, metricName+".mean", metric.Mean(), now)
|
||||
this.addFloat(buf, metricName+".std", metric.StdDev(), now)
|
||||
this.addFloat(buf, metricName+".p25", percentiles[0], now)
|
||||
this.addFloat(buf, metricName+".p75", percentiles[1], now)
|
||||
this.addFloat(buf, metricName+".p90", percentiles[2], now)
|
||||
this.addFloat(buf, metricName+".p99", percentiles[3], now)
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace("Metrics: GraphitePublisher.Publish() \n%s", buf)
|
||||
_, err = conn.Write(buf.Bytes())
|
||||
|
||||
if err != nil {
|
||||
log.Error(3, "Metrics: GraphitePublisher: Failed to send metrics! %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (this *GraphitePublisher) addInt(buf *bytes.Buffer, metric string, value int64, now int64) {
|
||||
buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, value, now))
|
||||
}
|
||||
|
||||
func (this *GraphitePublisher) addFloat(buf *bytes.Buffer, metric string, value float64, now int64) {
|
||||
buf.WriteString(fmt.Sprintf("%s %f %d\n", metric, value, now))
|
||||
}
|
||||
|
||||
func (this *GraphitePublisher) addCount(buf *bytes.Buffer, metric string, value int64, now int64) {
|
||||
delta := value
|
||||
|
||||
if last, ok := this.prevCounts[metric]; ok {
|
||||
delta = calculateDelta(last, value)
|
||||
}
|
||||
|
||||
this.prevCounts[metric] = value
|
||||
buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, delta, now))
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestGraphitePublisher(t *testing.T) {
|
||||
|
||||
setting.CustomInitPath = "conf/does_not_exist.ini"
|
||||
|
||||
Convey("Test graphite prefix replacement", t, func() {
|
||||
var err error
|
||||
err = setting.NewConfigContext(&setting.CommandLineArgs{
|
||||
HomePath: "../../",
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
sec, err := setting.Cfg.NewSection("metrics.graphite")
|
||||
sec.NewKey("prefix", "prod.grafana.%(instance_name)s.")
|
||||
sec.NewKey("address", "localhost:2001")
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
setting.InstanceName = "hostname.with.dots.com"
|
||||
publisher, err := CreateGraphitePublisher()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(publisher, ShouldNotBeNil)
|
||||
|
||||
So(publisher.prefix, ShouldEqual, "prod.grafana.hostname_with_dots_com.")
|
||||
So(publisher.address, ShouldEqual, "localhost:2001")
|
||||
})
|
||||
|
||||
Convey("Test graphite publisher default prefix", t, func() {
|
||||
var err error
|
||||
err = setting.NewConfigContext(&setting.CommandLineArgs{
|
||||
HomePath: "../../",
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
sec, err := setting.Cfg.NewSection("metrics.graphite")
|
||||
sec.NewKey("address", "localhost:2001")
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
setting.InstanceName = "hostname.with.dots.com"
|
||||
publisher, err := CreateGraphitePublisher()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(publisher, ShouldNotBeNil)
|
||||
|
||||
So(publisher.prefix, ShouldEqual, "prod.grafana.hostname_with_dots_com.")
|
||||
So(publisher.address, ShouldEqual, "localhost:2001")
|
||||
})
|
||||
|
||||
Convey("Test graphite publisher default values", t, func() {
|
||||
var err error
|
||||
err = setting.NewConfigContext(&setting.CommandLineArgs{
|
||||
HomePath: "../../",
|
||||
})
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, err = setting.Cfg.NewSection("metrics.graphite")
|
||||
|
||||
publisher, err := CreateGraphitePublisher()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(publisher, ShouldBeNil)
|
||||
})
|
||||
}
|
396
pkg/metrics/graphitebridge/graphite.go
Normal file
396
pkg/metrics/graphitebridge/graphite.go
Normal file
@ -0,0 +1,396 @@
|
||||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package graphite provides a bridge to push Prometheus metrics to a Graphite
|
||||
// server.
|
||||
package graphitebridge
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultInterval = 15 * time.Second
|
||||
millisecondsPerSecond = 1000
|
||||
)
|
||||
|
||||
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
||||
// errors.
|
||||
type HandlerErrorHandling int
|
||||
|
||||
// These constants cause handlers serving metrics to behave as described if
|
||||
// errors are encountered.
|
||||
const (
|
||||
// Ignore errors and try to push as many metrics to Graphite as possible.
|
||||
ContinueOnError HandlerErrorHandling = iota
|
||||
|
||||
// Abort the push to Graphite upon the first error encountered.
|
||||
AbortOnError
|
||||
)
|
||||
|
||||
var metricCategoryPrefix []string = []string{"proxy_", "api_", "page_", "alerting_", "aws_", "db_", "stat_", "go_", "process_"}
|
||||
var trimMetricPrefix []string = []string{"grafana_"}
|
||||
|
||||
// Config defines the Graphite bridge config.
|
||||
type Config struct {
|
||||
// The url to push data to. Required.
|
||||
URL string
|
||||
|
||||
// The prefix for the pushed Graphite metrics. Defaults to empty string.
|
||||
Prefix string
|
||||
|
||||
// The interval to use for pushing data to Graphite. Defaults to 15 seconds.
|
||||
Interval time.Duration
|
||||
|
||||
// The timeout for pushing metrics to Graphite. Defaults to 15 seconds.
|
||||
Timeout time.Duration
|
||||
|
||||
// The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer.
|
||||
Gatherer prometheus.Gatherer
|
||||
|
||||
// The logger that messages are written to. Defaults to no logging.
|
||||
Logger Logger
|
||||
|
||||
// ErrorHandling defines how errors are handled. Note that errors are
|
||||
// logged regardless of the configured ErrorHandling provided Logger
|
||||
// is not nil.
|
||||
ErrorHandling HandlerErrorHandling
|
||||
|
||||
// Graphite does not support ever increasing counter the same way
|
||||
// prometheus does. Rollups and ingestion might cannot handle ever
|
||||
// increasing counters. This option allows enabled the caller to
|
||||
// calculate the delta by saving the last sent counter in memory
|
||||
// and subtraction it from the collected value before sending.
|
||||
CountersAsDelta bool
|
||||
}
|
||||
|
||||
// Bridge pushes metrics to the configured Graphite server.
|
||||
type Bridge struct {
|
||||
url string
|
||||
prefix string
|
||||
countersAsDetlas bool
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
|
||||
errorHandling HandlerErrorHandling
|
||||
logger Logger
|
||||
|
||||
g prometheus.Gatherer
|
||||
|
||||
lastValue map[model.Fingerprint]float64
|
||||
}
|
||||
|
||||
// Logger is the minimal interface Bridge needs for logging. Note that
|
||||
// log.Logger from the standard library implements this interface, and it is
|
||||
// easy to implement by custom loggers, if they don't do so already anyway.
|
||||
type Logger interface {
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// NewBridge returns a pointer to a new Bridge struct.
|
||||
func NewBridge(c *Config) (*Bridge, error) {
|
||||
b := &Bridge{}
|
||||
|
||||
if c.URL == "" {
|
||||
return nil, errors.New("missing URL")
|
||||
}
|
||||
b.url = c.URL
|
||||
|
||||
if c.Gatherer == nil {
|
||||
b.g = prometheus.DefaultGatherer
|
||||
} else {
|
||||
b.g = c.Gatherer
|
||||
}
|
||||
|
||||
if c.Logger != nil {
|
||||
b.logger = c.Logger
|
||||
}
|
||||
|
||||
if c.Prefix != "" {
|
||||
b.prefix = c.Prefix
|
||||
}
|
||||
|
||||
var z time.Duration
|
||||
if c.Interval == z {
|
||||
b.interval = defaultInterval
|
||||
} else {
|
||||
b.interval = c.Interval
|
||||
}
|
||||
|
||||
if c.Timeout == z {
|
||||
b.timeout = defaultInterval
|
||||
} else {
|
||||
b.timeout = c.Timeout
|
||||
}
|
||||
|
||||
b.errorHandling = c.ErrorHandling
|
||||
b.lastValue = map[model.Fingerprint]float64{}
|
||||
b.countersAsDetlas = c.CountersAsDelta
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Run starts the event loop that pushes Prometheus metrics to Graphite at the
|
||||
// configured interval.
|
||||
func (b *Bridge) Run(ctx context.Context) {
|
||||
ticker := time.NewTicker(b.interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := b.Push(); err != nil && b.logger != nil {
|
||||
b.logger.Println("error pushing to Graphite:", err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push pushes Prometheus metrics to the configured Graphite server.
|
||||
func (b *Bridge) Push() error {
|
||||
mfs, err := b.g.Gather()
|
||||
if err != nil || len(mfs) == 0 {
|
||||
switch b.errorHandling {
|
||||
case AbortOnError:
|
||||
return err
|
||||
case ContinueOnError:
|
||||
if b.logger != nil {
|
||||
b.logger.Println("continue on error:", err)
|
||||
}
|
||||
default:
|
||||
panic("unrecognized error handling value")
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout("tcp", b.url, b.timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
return b.writeMetrics(conn, mfs, b.prefix, model.Now())
|
||||
}
|
||||
|
||||
func (b *Bridge) writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error {
|
||||
for _, mf := range mfs {
|
||||
vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
|
||||
Timestamp: now,
|
||||
}, mf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := bufio.NewWriter(w)
|
||||
for _, s := range vec {
|
||||
if err := writePrefix(buf, prefix); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeMetric(buf, s.Metric, mf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value := b.replaceCounterWithDelta(mf, s.Metric, s.Value)
|
||||
if _, err := fmt.Fprintf(buf, " %g %d\n", value, int64(s.Timestamp)/millisecondsPerSecond); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := buf.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeMetric(buf *bufio.Writer, m model.Metric, mf *dto.MetricFamily) error {
|
||||
metricName, hasName := m[model.MetricNameLabel]
|
||||
numLabels := len(m) - 1
|
||||
if !hasName {
|
||||
numLabels = len(m)
|
||||
}
|
||||
for _, v := range metricCategoryPrefix {
|
||||
if strings.HasPrefix(string(metricName), v) {
|
||||
group := strings.Replace(v, "_", " ", 1)
|
||||
metricName = model.LabelValue(strings.Replace(string(metricName), v, group, 1))
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range trimMetricPrefix {
|
||||
if strings.HasPrefix(string(metricName), v) {
|
||||
metricName = model.LabelValue(strings.Replace(string(metricName), v, "", 1))
|
||||
}
|
||||
}
|
||||
|
||||
labelStrings := make([]string, 0, numLabels)
|
||||
for label, value := range m {
|
||||
if label != model.MetricNameLabel {
|
||||
labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value)))
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
switch numLabels {
|
||||
case 0:
|
||||
if hasName {
|
||||
if err := writeSanitized(buf, string(metricName)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
sort.Strings(labelStrings)
|
||||
if err = writeSanitized(buf, string(metricName)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, s := range labelStrings {
|
||||
if err = buf.WriteByte('.'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writeSanitized(buf, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = addExtentionConventionForRollups(buf, mf, m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addExtentionConventionForRollups(buf *bufio.Writer, mf *dto.MetricFamily, m model.Metric) error {
|
||||
// Adding `.count` `.sum` suffix makes it possible to configure
|
||||
// different rollup strategies based on metric type
|
||||
|
||||
mfType := mf.GetType()
|
||||
var err error
|
||||
if mfType == dto.MetricType_COUNTER {
|
||||
if _, err = fmt.Fprint(buf, ".count"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if mfType == dto.MetricType_SUMMARY || mfType == dto.MetricType_HISTOGRAM {
|
||||
if strings.HasSuffix(string(m[model.MetricNameLabel]), "_count") {
|
||||
if _, err = fmt.Fprint(buf, ".count"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if mfType == dto.MetricType_HISTOGRAM {
|
||||
if strings.HasSuffix(string(m[model.MetricNameLabel]), "_sum") {
|
||||
if _, err = fmt.Fprint(buf, ".sum"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writePrefix(buf *bufio.Writer, s string) error {
|
||||
for _, c := range s {
|
||||
if _, err := buf.WriteRune(replaceInvalid(c)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeSanitized(buf *bufio.Writer, s string) error {
|
||||
prevUnderscore := false
|
||||
|
||||
for _, c := range s {
|
||||
c = replaceInvalidRune(c)
|
||||
if c == '_' {
|
||||
if prevUnderscore {
|
||||
continue
|
||||
}
|
||||
prevUnderscore = true
|
||||
} else {
|
||||
prevUnderscore = false
|
||||
}
|
||||
if _, err := buf.WriteRune(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func replaceInvalid(c rune) rune {
|
||||
if c == ' ' || c == '.' {
|
||||
return '.'
|
||||
}
|
||||
return replaceInvalidRune(c)
|
||||
}
|
||||
|
||||
func replaceInvalidRune(c rune) rune {
|
||||
if c == ' ' {
|
||||
return '.'
|
||||
}
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) {
|
||||
return '_'
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (b *Bridge) replaceCounterWithDelta(mf *dto.MetricFamily, metric model.Metric, value model.SampleValue) float64 {
|
||||
if !b.countersAsDetlas {
|
||||
return float64(value)
|
||||
}
|
||||
|
||||
mfType := mf.GetType()
|
||||
if mfType == dto.MetricType_COUNTER {
|
||||
return b.returnDelta(metric, value)
|
||||
}
|
||||
|
||||
if mfType == dto.MetricType_SUMMARY {
|
||||
if strings.HasSuffix(string(metric[model.MetricNameLabel]), "_count") {
|
||||
return b.returnDelta(metric, value)
|
||||
}
|
||||
}
|
||||
|
||||
return float64(value)
|
||||
}
|
||||
|
||||
func (b *Bridge) returnDelta(metric model.Metric, value model.SampleValue) float64 {
|
||||
key := metric.Fingerprint()
|
||||
_, exists := b.lastValue[key]
|
||||
if !exists {
|
||||
b.lastValue[key] = 0
|
||||
}
|
||||
|
||||
delta := float64(value) - b.lastValue[key]
|
||||
b.lastValue[key] = float64(value)
|
||||
|
||||
return delta
|
||||
}
|
503
pkg/metrics/graphitebridge/graphite_test.go
Normal file
503
pkg/metrics/graphitebridge/graphite_test.go
Normal file
@ -0,0 +1,503 @@
|
||||
package graphitebridge
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"net"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
func TestCountersAsDelta(t *testing.T) {
|
||||
b, _ := NewBridge(&Config{
|
||||
URL: "localhost:12345",
|
||||
CountersAsDelta: true,
|
||||
})
|
||||
ty := dto.MetricType(0)
|
||||
mf := &dto.MetricFamily{
|
||||
Type: &ty,
|
||||
Metric: []*dto.Metric{},
|
||||
}
|
||||
m := model.Metric{}
|
||||
|
||||
var want float64
|
||||
var got float64
|
||||
want = float64(1)
|
||||
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(1))
|
||||
if got != want {
|
||||
t.Fatalf("want %v got %v", want, got)
|
||||
}
|
||||
|
||||
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(2))
|
||||
if got != want {
|
||||
t.Fatalf("want %v got %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCountersAsDeltaDisabled(t *testing.T) {
|
||||
b, _ := NewBridge(&Config{
|
||||
URL: "localhost:12345",
|
||||
CountersAsDelta: false,
|
||||
})
|
||||
ty := dto.MetricType(0)
|
||||
mf := &dto.MetricFamily{
|
||||
Type: &ty,
|
||||
Metric: []*dto.Metric{},
|
||||
}
|
||||
m := model.Metric{}
|
||||
|
||||
var want float64
|
||||
var got float64
|
||||
want = float64(1)
|
||||
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(1))
|
||||
if got != want {
|
||||
t.Fatalf("want %v got %v", want, got)
|
||||
}
|
||||
|
||||
want = float64(2)
|
||||
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(2))
|
||||
if got != want {
|
||||
t.Fatalf("want %v got %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{in: "hello", out: "hello"},
|
||||
{in: "hE/l1o", out: "hE_l1o"},
|
||||
{in: "he,*ll(.o", out: "he_ll_o"},
|
||||
{in: "hello_there%^&", out: "hello_there_"},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
w := bufio.NewWriter(&buf)
|
||||
|
||||
for i, tc := range testCases {
|
||||
if err := writeSanitized(w, tc.in); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatalf("flush failed: %v", err)
|
||||
}
|
||||
|
||||
if want, got := tc.out, buf.String(); want != got {
|
||||
t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizePrefix(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{in: "service.prod.", out: "service.prod."},
|
||||
{in: "service.prod", out: "service.prod"},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
w := bufio.NewWriter(&buf)
|
||||
|
||||
for i, tc := range testCases {
|
||||
if err := writePrefix(w, tc.in); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatalf("flush failed: %v", err)
|
||||
}
|
||||
|
||||
if want, got := tc.out, buf.String(); want != got {
|
||||
t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteSummary(t *testing.T) {
|
||||
sumVec := prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "name",
|
||||
Help: "docstring",
|
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"labelname"},
|
||||
)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(sumVec)
|
||||
|
||||
b, err := NewBridge(&Config{
|
||||
URL: "localhost:8080",
|
||||
Gatherer: reg,
|
||||
CountersAsDelta: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create bridge. err: %v", err)
|
||||
}
|
||||
|
||||
sumVec.WithLabelValues("val1").Observe(float64(10))
|
||||
sumVec.WithLabelValues("val1").Observe(float64(20))
|
||||
sumVec.WithLabelValues("val1").Observe(float64(30))
|
||||
sumVec.WithLabelValues("val2").Observe(float64(20))
|
||||
sumVec.WithLabelValues("val2").Observe(float64(30))
|
||||
sumVec.WithLabelValues("val2").Observe(float64(40))
|
||||
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
now := model.Time(1477043083)
|
||||
var buf bytes.Buffer
|
||||
err = b.writeMetrics(&buf, mfs, "prefix.", now)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043
|
||||
prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043
|
||||
prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043
|
||||
prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
|
||||
prefix.name_count.constname.constvalue.labelname.val1.count 3 1477043
|
||||
prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043
|
||||
prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043
|
||||
prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043
|
||||
prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
|
||||
prefix.name_count.constname.constvalue.labelname.val2.count 3 1477043
|
||||
`
|
||||
|
||||
if got := buf.String(); want != got {
|
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteHistogram(t *testing.T) {
|
||||
histVec := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "name",
|
||||
Help: "docstring",
|
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
||||
Buckets: []float64{0.01, 0.02, 0.05, 0.1},
|
||||
},
|
||||
[]string{"labelname"},
|
||||
)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(histVec)
|
||||
|
||||
b, err := NewBridge(&Config{
|
||||
URL: "localhost:8080",
|
||||
Gatherer: reg,
|
||||
CountersAsDelta: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("error creating bridge: %v", err)
|
||||
}
|
||||
|
||||
histVec.WithLabelValues("val1").Observe(float64(10))
|
||||
histVec.WithLabelValues("val1").Observe(float64(20))
|
||||
histVec.WithLabelValues("val1").Observe(float64(30))
|
||||
histVec.WithLabelValues("val2").Observe(float64(20))
|
||||
histVec.WithLabelValues("val2").Observe(float64(30))
|
||||
histVec.WithLabelValues("val2").Observe(float64(40))
|
||||
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
now := model.Time(1477043083)
|
||||
var buf bytes.Buffer
|
||||
err = b.writeMetrics(&buf, mfs, "prefix.", now)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043
|
||||
prefix.name_sum.constname.constvalue.labelname.val1.sum 60 1477043
|
||||
prefix.name_count.constname.constvalue.labelname.val1.count 3 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043
|
||||
prefix.name_sum.constname.constvalue.labelname.val2.sum 90 1477043
|
||||
prefix.name_count.constname.constvalue.labelname.val2.count 3 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043
|
||||
`
|
||||
if got := buf.String(); want != got {
|
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCounterVec(t *testing.T) {
|
||||
cntVec := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "page_response",
|
||||
Help: "docstring",
|
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
||||
},
|
||||
[]string{"labelname"},
|
||||
)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(cntVec)
|
||||
|
||||
cntVec.WithLabelValues("val1").Inc()
|
||||
cntVec.WithLabelValues("val2").Inc()
|
||||
|
||||
b, err := NewBridge(&Config{
|
||||
URL: "localhost:8080",
|
||||
Gatherer: reg,
|
||||
CountersAsDelta: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("error creating bridge: %v", err)
|
||||
}
|
||||
|
||||
// first collect
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083))
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
want := `prefix.page.response.constname.constvalue.labelname.val1.count 1 1477043
|
||||
prefix.page.response.constname.constvalue.labelname.val2.count 1 1477043
|
||||
`
|
||||
if got := buf.String(); want != got {
|
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
|
||||
}
|
||||
|
||||
//next collect
|
||||
cntVec.WithLabelValues("val1").Inc()
|
||||
cntVec.WithLabelValues("val2").Inc()
|
||||
|
||||
mfs, err = reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
buf = bytes.Buffer{}
|
||||
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477053083))
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
want2 := `prefix.page.response.constname.constvalue.labelname.val1.count 1 1477053
|
||||
prefix.page.response.constname.constvalue.labelname.val2.count 1 1477053
|
||||
`
|
||||
if got := buf.String(); want2 != got {
|
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want2, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCounter(t *testing.T) {
|
||||
cntVec := prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "page_response",
|
||||
Help: "docstring",
|
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
||||
})
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(cntVec)
|
||||
|
||||
cntVec.Inc()
|
||||
|
||||
b, err := NewBridge(&Config{
|
||||
URL: "localhost:8080",
|
||||
Gatherer: reg,
|
||||
CountersAsDelta: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("error creating bridge: %v", err)
|
||||
}
|
||||
|
||||
// first collect
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083))
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
want := "prefix.page.response.constname.constvalue.count 1 1477043\n"
|
||||
if got := buf.String(); want != got {
|
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
|
||||
}
|
||||
|
||||
//next collect
|
||||
cntVec.Inc()
|
||||
|
||||
mfs, err = reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
buf = bytes.Buffer{}
|
||||
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477053083))
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
want2 := "prefix.page.response.constname.constvalue.count 1 1477053\n"
|
||||
if got := buf.String(); want2 != got {
|
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want2, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrimGrafanaNamespace(t *testing.T) {
|
||||
cntVec := prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "grafana_http_request_total",
|
||||
Help: "docstring",
|
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
||||
})
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(cntVec)
|
||||
|
||||
cntVec.Inc()
|
||||
|
||||
b, err := NewBridge(&Config{
|
||||
URL: "localhost:8080",
|
||||
Gatherer: reg,
|
||||
CountersAsDelta: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("error creating bridge: %v", err)
|
||||
}
|
||||
|
||||
// first collect
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083))
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
want := "prefix.http_request_total.constname.constvalue.count 1 1477043\n"
|
||||
if got := buf.String(); want != got {
|
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush(t *testing.T) {
|
||||
reg := prometheus.NewRegistry()
|
||||
cntVec := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "name",
|
||||
Help: "docstring",
|
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
||||
},
|
||||
[]string{"labelname"},
|
||||
)
|
||||
cntVec.WithLabelValues("val1").Inc()
|
||||
cntVec.WithLabelValues("val2").Inc()
|
||||
reg.MustRegister(cntVec)
|
||||
|
||||
host := "localhost"
|
||||
port := ":56789"
|
||||
b, err := NewBridge(&Config{
|
||||
URL: host + port,
|
||||
Gatherer: reg,
|
||||
Prefix: "prefix.",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("error creating bridge: %v", err)
|
||||
}
|
||||
|
||||
nmg, err := newMockGraphite(port)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating mock graphite: %v", err)
|
||||
}
|
||||
defer nmg.Close()
|
||||
|
||||
err = b.Push()
|
||||
if err != nil {
|
||||
t.Fatalf("error pushing: %v", err)
|
||||
}
|
||||
|
||||
wants := []string{
|
||||
"prefix.name.constname.constvalue.labelname.val1.count 1",
|
||||
"prefix.name.constname.constvalue.labelname.val2.count 1",
|
||||
}
|
||||
|
||||
select {
|
||||
case got := <-nmg.readc:
|
||||
for _, want := range wants {
|
||||
matched, err := regexp.MatchString(want, got)
|
||||
if err != nil {
|
||||
t.Fatalf("error pushing: %v", err)
|
||||
}
|
||||
if !matched {
|
||||
t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got)
|
||||
}
|
||||
}
|
||||
return
|
||||
case err := <-nmg.errc:
|
||||
t.Fatalf("error reading push: %v", err)
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
t.Fatalf("no result from graphite server")
|
||||
}
|
||||
}
|
||||
|
||||
func newMockGraphite(port string) (*mockGraphite, error) {
|
||||
readc := make(chan string)
|
||||
errc := make(chan error)
|
||||
ln, err := net.Listen("tcp", port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
var b bytes.Buffer
|
||||
io.Copy(&b, conn)
|
||||
readc <- b.String()
|
||||
}()
|
||||
|
||||
return &mockGraphite{
|
||||
readc: readc,
|
||||
errc: errc,
|
||||
Listener: ln,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type mockGraphite struct {
|
||||
readc chan string
|
||||
errc chan error
|
||||
|
||||
net.Listener
|
||||
}
|
@ -1,189 +0,0 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
// Histograms calculate distribution statistics from a series of int64 values.
|
||||
type Histogram interface {
|
||||
Metric
|
||||
|
||||
Clear()
|
||||
Count() int64
|
||||
Max() int64
|
||||
Mean() float64
|
||||
Min() int64
|
||||
Percentile(float64) float64
|
||||
Percentiles([]float64) []float64
|
||||
StdDev() float64
|
||||
Sum() int64
|
||||
Update(int64)
|
||||
Variance() float64
|
||||
}
|
||||
|
||||
func NewHistogram(meta *MetricMeta, s Sample) Histogram {
|
||||
return &StandardHistogram{
|
||||
MetricMeta: meta,
|
||||
sample: s,
|
||||
}
|
||||
}
|
||||
|
||||
// HistogramSnapshot is a read-only copy of another Histogram.
|
||||
type HistogramSnapshot struct {
|
||||
*MetricMeta
|
||||
sample *SampleSnapshot
|
||||
}
|
||||
|
||||
// Clear panics.
|
||||
func (*HistogramSnapshot) Clear() {
|
||||
panic("Clear called on a HistogramSnapshot")
|
||||
}
|
||||
|
||||
// Count returns the number of samples recorded at the time the snapshot was
|
||||
// taken.
|
||||
func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
|
||||
|
||||
// Max returns the maximum value in the sample at the time the snapshot was
|
||||
// taken.
|
||||
func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
|
||||
|
||||
// Mean returns the mean of the values in the sample at the time the snapshot
|
||||
// was taken.
|
||||
func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
|
||||
|
||||
// Min returns the minimum value in the sample at the time the snapshot was
|
||||
// taken.
|
||||
func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
|
||||
|
||||
// Percentile returns an arbitrary percentile of values in the sample at the
|
||||
// time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Percentile(p float64) float64 {
|
||||
return h.sample.Percentile(p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values in the sample
|
||||
// at the time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
|
||||
return h.sample.Percentiles(ps)
|
||||
}
|
||||
|
||||
// Sample returns the Sample underlying the histogram.
|
||||
func (h *HistogramSnapshot) Sample() Sample { return h.sample }
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (h *HistogramSnapshot) Snapshot() Metric { return h }
|
||||
|
||||
// StdDev returns the standard deviation of the values in the sample at the
|
||||
// time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
|
||||
|
||||
// Sum returns the sum in the sample at the time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
|
||||
|
||||
// Update panics.
|
||||
func (*HistogramSnapshot) Update(int64) {
|
||||
panic("Update called on a HistogramSnapshot")
|
||||
}
|
||||
|
||||
// Variance returns the variance of inputs at the time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
|
||||
|
||||
// NilHistogram is a no-op Histogram.
|
||||
type NilHistogram struct {
|
||||
*MetricMeta
|
||||
}
|
||||
|
||||
// Clear is a no-op.
|
||||
func (NilHistogram) Clear() {}
|
||||
|
||||
// Count is a no-op.
|
||||
func (NilHistogram) Count() int64 { return 0 }
|
||||
|
||||
// Max is a no-op.
|
||||
func (NilHistogram) Max() int64 { return 0 }
|
||||
|
||||
// Mean is a no-op.
|
||||
func (NilHistogram) Mean() float64 { return 0.0 }
|
||||
|
||||
// Min is a no-op.
|
||||
func (NilHistogram) Min() int64 { return 0 }
|
||||
|
||||
// Percentile is a no-op.
|
||||
func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
|
||||
|
||||
// Percentiles is a no-op.
|
||||
func (NilHistogram) Percentiles(ps []float64) []float64 {
|
||||
return make([]float64, len(ps))
|
||||
}
|
||||
|
||||
// Sample is a no-op.
|
||||
func (NilHistogram) Sample() Sample { return NilSample{} }
|
||||
|
||||
// Snapshot is a no-op.
|
||||
func (n NilHistogram) Snapshot() Metric { return n }
|
||||
|
||||
// StdDev is a no-op.
|
||||
func (NilHistogram) StdDev() float64 { return 0.0 }
|
||||
|
||||
// Sum is a no-op.
|
||||
func (NilHistogram) Sum() int64 { return 0 }
|
||||
|
||||
// Update is a no-op.
|
||||
func (NilHistogram) Update(v int64) {}
|
||||
|
||||
// Variance is a no-op.
|
||||
func (NilHistogram) Variance() float64 { return 0.0 }
|
||||
|
||||
// StandardHistogram is the standard implementation of a Histogram and uses a
|
||||
// Sample to bound its memory use.
|
||||
type StandardHistogram struct {
|
||||
*MetricMeta
|
||||
sample Sample
|
||||
}
|
||||
|
||||
// Clear clears the histogram and its sample.
|
||||
func (h *StandardHistogram) Clear() { h.sample.Clear() }
|
||||
|
||||
// Count returns the number of samples recorded since the histogram was last
|
||||
// cleared.
|
||||
func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
|
||||
|
||||
// Max returns the maximum value in the sample.
|
||||
func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
|
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
|
||||
|
||||
// Min returns the minimum value in the sample.
|
||||
func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
|
||||
|
||||
// Percentile returns an arbitrary percentile of the values in the sample.
|
||||
func (h *StandardHistogram) Percentile(p float64) float64 {
|
||||
return h.sample.Percentile(p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
||||
// sample.
|
||||
func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
|
||||
return h.sample.Percentiles(ps)
|
||||
}
|
||||
|
||||
// Sample returns the Sample underlying the histogram.
|
||||
func (h *StandardHistogram) Sample() Sample { return h.sample }
|
||||
|
||||
// Snapshot returns a read-only copy of the histogram.
|
||||
func (h *StandardHistogram) Snapshot() Metric {
|
||||
return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
|
||||
}
|
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
|
||||
|
||||
// Sum returns the sum in the sample.
|
||||
func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
|
||||
|
||||
// Update samples a new value.
|
||||
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
|
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
|
@ -1,90 +0,0 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import "testing"
|
||||
|
||||
func BenchmarkHistogram(b *testing.B) {
|
||||
h := NewHistogram(nil, NewUniformSample(100))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Update(int64(i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistogram10000(t *testing.T) {
|
||||
h := NewHistogram(nil, NewUniformSample(100000))
|
||||
for i := 1; i <= 10000; i++ {
|
||||
h.Update(int64(i))
|
||||
}
|
||||
testHistogram10000(t, h)
|
||||
}
|
||||
|
||||
func TestHistogramEmpty(t *testing.T) {
|
||||
h := NewHistogram(nil, NewUniformSample(100))
|
||||
if count := h.Count(); 0 != count {
|
||||
t.Errorf("h.Count(): 0 != %v\n", count)
|
||||
}
|
||||
if min := h.Min(); 0 != min {
|
||||
t.Errorf("h.Min(): 0 != %v\n", min)
|
||||
}
|
||||
if max := h.Max(); 0 != max {
|
||||
t.Errorf("h.Max(): 0 != %v\n", max)
|
||||
}
|
||||
if mean := h.Mean(); 0.0 != mean {
|
||||
t.Errorf("h.Mean(): 0.0 != %v\n", mean)
|
||||
}
|
||||
if stdDev := h.StdDev(); 0.0 != stdDev {
|
||||
t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
|
||||
}
|
||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||
if 0.0 != ps[0] {
|
||||
t.Errorf("median: 0.0 != %v\n", ps[0])
|
||||
}
|
||||
if 0.0 != ps[1] {
|
||||
t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
|
||||
}
|
||||
if 0.0 != ps[2] {
|
||||
t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistogramSnapshot(t *testing.T) {
|
||||
h := NewHistogram(nil, NewUniformSample(100000))
|
||||
for i := 1; i <= 10000; i++ {
|
||||
h.Update(int64(i))
|
||||
}
|
||||
snapshot := h.Snapshot().(Histogram)
|
||||
h.Update(0)
|
||||
testHistogram10000(t, snapshot)
|
||||
}
|
||||
|
||||
func testHistogram10000(t *testing.T, h Histogram) {
|
||||
if count := h.Count(); 10000 != count {
|
||||
t.Errorf("h.Count(): 10000 != %v\n", count)
|
||||
}
|
||||
if min := h.Min(); 1 != min {
|
||||
t.Errorf("h.Min(): 1 != %v\n", min)
|
||||
}
|
||||
if max := h.Max(); 10000 != max {
|
||||
t.Errorf("h.Max(): 10000 != %v\n", max)
|
||||
}
|
||||
if mean := h.Mean(); 5000.5 != mean {
|
||||
t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
|
||||
}
|
||||
if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
|
||||
t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
|
||||
}
|
||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||
if 5000.5 != ps[0] {
|
||||
t.Errorf("median: 5000.5 != %v\n", ps[0])
|
||||
}
|
||||
if 7500.75 != ps[1] {
|
||||
t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
|
||||
}
|
||||
if 9900.99 != ps[2] {
|
||||
t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
|
||||
}
|
||||
}
|
38
pkg/metrics/init.go
Normal file
38
pkg/metrics/init.go
Normal file
@ -0,0 +1,38 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ini "gopkg.in/ini.v1"
|
||||
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics/graphitebridge"
|
||||
)
|
||||
|
||||
var metricsLogger log.Logger = log.New("metrics")
|
||||
|
||||
type logWrapper struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func (lw *logWrapper) Println(v ...interface{}) {
|
||||
lw.logger.Info("graphite metric bridge", v...)
|
||||
}
|
||||
|
||||
func Init(file *ini.File) {
|
||||
cfg := ReadSettings(file)
|
||||
internalInit(cfg)
|
||||
}
|
||||
|
||||
func internalInit(settings *MetricSettings) {
|
||||
initMetricVars(settings)
|
||||
|
||||
if settings.GraphiteBridgeConfig != nil {
|
||||
bridge, err := graphitebridge.NewBridge(settings.GraphiteBridgeConfig)
|
||||
if err != nil {
|
||||
metricsLogger.Error("failed to create graphite bridge", "error", err)
|
||||
} else {
|
||||
go bridge.Run(context.Background())
|
||||
}
|
||||
}
|
||||
}
|
@ -1,221 +0,0 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Meters count events to produce exponentially-weighted moving average rates
|
||||
// at one-, five-, and fifteen-minutes and a mean rate.
|
||||
type Meter interface {
|
||||
Metric
|
||||
|
||||
Count() int64
|
||||
Mark(int64)
|
||||
Rate1() float64
|
||||
Rate5() float64
|
||||
Rate15() float64
|
||||
RateMean() float64
|
||||
}
|
||||
|
||||
// NewMeter constructs a new StandardMeter and launches a goroutine.
|
||||
func NewMeter(meta *MetricMeta) Meter {
|
||||
if UseNilMetrics {
|
||||
return NilMeter{}
|
||||
}
|
||||
|
||||
m := newStandardMeter(meta)
|
||||
arbiter.Lock()
|
||||
defer arbiter.Unlock()
|
||||
arbiter.meters = append(arbiter.meters, m)
|
||||
if !arbiter.started {
|
||||
arbiter.started = true
|
||||
go arbiter.tick()
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type MeterSnapshot struct {
|
||||
*MetricMeta
|
||||
count int64
|
||||
rate1, rate5, rate15, rateMean float64
|
||||
}
|
||||
|
||||
// Count returns the count of events at the time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Count() int64 { return m.count }
|
||||
|
||||
// Mark panics.
|
||||
func (*MeterSnapshot) Mark(n int64) {
|
||||
panic("Mark called on a MeterSnapshot")
|
||||
}
|
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second at the
|
||||
// time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
|
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second at
|
||||
// the time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
|
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second
|
||||
// at the time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
|
||||
|
||||
// RateMean returns the meter's mean rate of events per second at the time the
|
||||
// snapshot was taken.
|
||||
func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (m *MeterSnapshot) Snapshot() Metric { return m }
|
||||
|
||||
// NilMeter is a no-op Meter.
|
||||
type NilMeter struct{ *MetricMeta }
|
||||
|
||||
// Count is a no-op.
|
||||
func (NilMeter) Count() int64 { return 0 }
|
||||
|
||||
// Mark is a no-op.
|
||||
func (NilMeter) Mark(n int64) {}
|
||||
|
||||
// Rate1 is a no-op.
|
||||
func (NilMeter) Rate1() float64 { return 0.0 }
|
||||
|
||||
// Rate5 is a no-op.
|
||||
func (NilMeter) Rate5() float64 { return 0.0 }
|
||||
|
||||
// Rate15is a no-op.
|
||||
func (NilMeter) Rate15() float64 { return 0.0 }
|
||||
|
||||
// RateMean is a no-op.
|
||||
func (NilMeter) RateMean() float64 { return 0.0 }
|
||||
|
||||
// Snapshot is a no-op.
|
||||
func (NilMeter) Snapshot() Metric { return NilMeter{} }
|
||||
|
||||
// StandardMeter is the standard implementation of a Meter.
|
||||
type StandardMeter struct {
|
||||
*MetricMeta
|
||||
lock sync.RWMutex
|
||||
snapshot *MeterSnapshot
|
||||
a1, a5, a15 EWMA
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newStandardMeter(meta *MetricMeta) *StandardMeter {
|
||||
return &StandardMeter{
|
||||
MetricMeta: meta,
|
||||
snapshot: &MeterSnapshot{MetricMeta: meta},
|
||||
a1: NewEWMA1(),
|
||||
a5: NewEWMA5(),
|
||||
a15: NewEWMA15(),
|
||||
startTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// Count returns the number of events recorded.
|
||||
func (m *StandardMeter) Count() int64 {
|
||||
m.lock.RLock()
|
||||
count := m.snapshot.count
|
||||
m.lock.RUnlock()
|
||||
return count
|
||||
}
|
||||
|
||||
// Mark records the occurrence of n events.
|
||||
func (m *StandardMeter) Mark(n int64) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
m.snapshot.count += n
|
||||
m.a1.Update(n)
|
||||
m.a5.Update(n)
|
||||
m.a15.Update(n)
|
||||
m.updateSnapshot()
|
||||
}
|
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second.
|
||||
func (m *StandardMeter) Rate1() float64 {
|
||||
m.lock.RLock()
|
||||
rate1 := m.snapshot.rate1
|
||||
m.lock.RUnlock()
|
||||
return rate1
|
||||
}
|
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second.
|
||||
func (m *StandardMeter) Rate5() float64 {
|
||||
m.lock.RLock()
|
||||
rate5 := m.snapshot.rate5
|
||||
m.lock.RUnlock()
|
||||
return rate5
|
||||
}
|
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
||||
func (m *StandardMeter) Rate15() float64 {
|
||||
m.lock.RLock()
|
||||
rate15 := m.snapshot.rate15
|
||||
m.lock.RUnlock()
|
||||
return rate15
|
||||
}
|
||||
|
||||
// RateMean returns the meter's mean rate of events per second.
|
||||
func (m *StandardMeter) RateMean() float64 {
|
||||
m.lock.RLock()
|
||||
rateMean := m.snapshot.rateMean
|
||||
m.lock.RUnlock()
|
||||
return rateMean
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the meter.
|
||||
func (m *StandardMeter) Snapshot() Metric {
|
||||
m.lock.RLock()
|
||||
snapshot := *m.snapshot
|
||||
m.lock.RUnlock()
|
||||
return &snapshot
|
||||
}
|
||||
|
||||
func (m *StandardMeter) updateSnapshot() {
|
||||
// should run with write lock held on m.lock
|
||||
snapshot := m.snapshot
|
||||
snapshot.rate1 = m.a1.Rate()
|
||||
snapshot.rate5 = m.a5.Rate()
|
||||
snapshot.rate15 = m.a15.Rate()
|
||||
snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
|
||||
}
|
||||
|
||||
func (m *StandardMeter) tick() {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
m.a1.Tick()
|
||||
m.a5.Tick()
|
||||
m.a15.Tick()
|
||||
m.updateSnapshot()
|
||||
}
|
||||
|
||||
type meterArbiter struct {
|
||||
sync.RWMutex
|
||||
started bool
|
||||
meters []*StandardMeter
|
||||
ticker *time.Ticker
|
||||
}
|
||||
|
||||
var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
|
||||
|
||||
// Ticks meters on the scheduled interval
|
||||
func (ma *meterArbiter) tick() {
|
||||
for {
|
||||
select {
|
||||
case <-ma.ticker.C:
|
||||
ma.tickMeters()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ma *meterArbiter) tickMeters() {
|
||||
ma.RLock()
|
||||
defer ma.RUnlock()
|
||||
for _, meter := range ma.meters {
|
||||
meter.tick()
|
||||
}
|
||||
}
|
@ -1,151 +1,398 @@
|
||||
package metrics
|
||||
|
||||
var MetricStats Registry
|
||||
var UseNilMetrics bool
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
func init() {
|
||||
// init with nil metrics
|
||||
initMetricVars(&MetricSettings{})
|
||||
}
|
||||
|
||||
var (
|
||||
M_Instance_Start Counter
|
||||
M_Page_Status_200 Counter
|
||||
M_Page_Status_500 Counter
|
||||
M_Page_Status_404 Counter
|
||||
M_Page_Status_Unknown Counter
|
||||
M_Api_Status_200 Counter
|
||||
M_Api_Status_404 Counter
|
||||
M_Api_Status_500 Counter
|
||||
M_Api_Status_Unknown Counter
|
||||
M_Proxy_Status_200 Counter
|
||||
M_Proxy_Status_404 Counter
|
||||
M_Proxy_Status_500 Counter
|
||||
M_Proxy_Status_Unknown Counter
|
||||
M_Api_User_SignUpStarted Counter
|
||||
M_Api_User_SignUpCompleted Counter
|
||||
M_Api_User_SignUpInvite Counter
|
||||
M_Api_Dashboard_Save Timer
|
||||
M_Api_Dashboard_Get Timer
|
||||
M_Api_Dashboard_Search Timer
|
||||
M_Api_Admin_User_Create Counter
|
||||
M_Api_Login_Post Counter
|
||||
M_Api_Login_OAuth Counter
|
||||
M_Api_Org_Create Counter
|
||||
M_Api_Dashboard_Snapshot_Create Counter
|
||||
M_Api_Dashboard_Snapshot_External Counter
|
||||
M_Api_Dashboard_Snapshot_Get Counter
|
||||
M_Api_UserGroup_Create Counter
|
||||
M_Api_Dashboard_Acl_Update Counter
|
||||
M_Models_Dashboard_Insert Counter
|
||||
M_Alerting_Result_State_Alerting Counter
|
||||
M_Alerting_Result_State_Ok Counter
|
||||
M_Alerting_Result_State_Paused Counter
|
||||
M_Alerting_Result_State_NoData Counter
|
||||
M_Alerting_Result_State_Pending Counter
|
||||
M_Alerting_Notification_Sent_Slack Counter
|
||||
M_Alerting_Notification_Sent_Email Counter
|
||||
M_Alerting_Notification_Sent_Webhook Counter
|
||||
M_Alerting_Notification_Sent_DingDing Counter
|
||||
M_Alerting_Notification_Sent_PagerDuty Counter
|
||||
M_Alerting_Notification_Sent_LINE Counter
|
||||
M_Alerting_Notification_Sent_Victorops Counter
|
||||
M_Alerting_Notification_Sent_OpsGenie Counter
|
||||
M_Alerting_Notification_Sent_Telegram Counter
|
||||
M_Alerting_Notification_Sent_Threema Counter
|
||||
M_Alerting_Notification_Sent_Sensu Counter
|
||||
M_Alerting_Notification_Sent_Pushover Counter
|
||||
M_Aws_CloudWatch_GetMetricStatistics Counter
|
||||
M_Aws_CloudWatch_ListMetrics Counter
|
||||
M_DB_DataSource_QueryById Counter
|
||||
|
||||
// Timers
|
||||
M_DataSource_ProxyReq_Timer Timer
|
||||
M_Alerting_Execution_Time Timer
|
||||
|
||||
// StatTotals
|
||||
M_Alerting_Active_Alerts Gauge
|
||||
M_StatTotal_Dashboards Gauge
|
||||
M_StatTotal_Users Gauge
|
||||
M_StatTotal_Orgs Gauge
|
||||
M_StatTotal_Playlists Gauge
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func initMetricVars(settings *MetricSettings) {
|
||||
UseNilMetrics = settings.Enabled == false
|
||||
MetricStats = NewRegistry()
|
||||
const exporterName = "grafana"
|
||||
|
||||
M_Instance_Start = RegCounter("instance_start")
|
||||
var (
|
||||
M_Instance_Start prometheus.Counter
|
||||
M_Page_Status *prometheus.CounterVec
|
||||
M_Api_Status *prometheus.CounterVec
|
||||
M_Proxy_Status *prometheus.CounterVec
|
||||
M_Http_Request_Total *prometheus.CounterVec
|
||||
M_Http_Request_Summary *prometheus.SummaryVec
|
||||
|
||||
M_Page_Status_200 = RegCounter("page.resp_status", "code", "200")
|
||||
M_Page_Status_500 = RegCounter("page.resp_status", "code", "500")
|
||||
M_Page_Status_404 = RegCounter("page.resp_status", "code", "404")
|
||||
M_Page_Status_Unknown = RegCounter("page.resp_status", "code", "unknown")
|
||||
M_Api_User_SignUpStarted prometheus.Counter
|
||||
M_Api_User_SignUpCompleted prometheus.Counter
|
||||
M_Api_User_SignUpInvite prometheus.Counter
|
||||
M_Api_Dashboard_Save prometheus.Summary
|
||||
M_Api_Dashboard_Get prometheus.Summary
|
||||
M_Api_Dashboard_Search prometheus.Summary
|
||||
M_Api_Admin_User_Create prometheus.Counter
|
||||
M_Api_Login_Post prometheus.Counter
|
||||
M_Api_Login_OAuth prometheus.Counter
|
||||
M_Api_Org_Create prometheus.Counter
|
||||
|
||||
M_Api_Status_200 = RegCounter("api.resp_status", "code", "200")
|
||||
M_Api_Status_404 = RegCounter("api.resp_status", "code", "404")
|
||||
M_Api_Status_500 = RegCounter("api.resp_status", "code", "500")
|
||||
M_Api_Status_Unknown = RegCounter("api.resp_status", "code", "unknown")
|
||||
|
||||
M_Proxy_Status_200 = RegCounter("proxy.resp_status", "code", "200")
|
||||
M_Proxy_Status_404 = RegCounter("proxy.resp_status", "code", "404")
|
||||
M_Proxy_Status_500 = RegCounter("proxy.resp_status", "code", "500")
|
||||
M_Proxy_Status_Unknown = RegCounter("proxy.resp_status", "code", "unknown")
|
||||
|
||||
M_Api_User_SignUpStarted = RegCounter("api.user.signup_started")
|
||||
M_Api_User_SignUpCompleted = RegCounter("api.user.signup_completed")
|
||||
M_Api_User_SignUpInvite = RegCounter("api.user.signup_invite")
|
||||
|
||||
M_Api_UserGroup_Create = RegCounter("api.usergroup.create")
|
||||
M_Api_Dashboard_Acl_Update = RegCounter("api.dashboard.acl.update")
|
||||
|
||||
M_Api_Dashboard_Save = RegTimer("api.dashboard.save")
|
||||
M_Api_Dashboard_Get = RegTimer("api.dashboard.get")
|
||||
M_Api_Dashboard_Search = RegTimer("api.dashboard.search")
|
||||
|
||||
M_Api_Admin_User_Create = RegCounter("api.admin.user_create")
|
||||
M_Api_Login_Post = RegCounter("api.login.post")
|
||||
M_Api_Login_OAuth = RegCounter("api.login.oauth")
|
||||
M_Api_Org_Create = RegCounter("api.org.create")
|
||||
|
||||
M_Api_Dashboard_Snapshot_Create = RegCounter("api.dashboard_snapshot.create")
|
||||
M_Api_Dashboard_Snapshot_External = RegCounter("api.dashboard_snapshot.external")
|
||||
M_Api_Dashboard_Snapshot_Get = RegCounter("api.dashboard_snapshot.get")
|
||||
|
||||
M_Models_Dashboard_Insert = RegCounter("models.dashboard.insert")
|
||||
|
||||
M_Alerting_Result_State_Alerting = RegCounter("alerting.result", "state", "alerting")
|
||||
M_Alerting_Result_State_Ok = RegCounter("alerting.result", "state", "ok")
|
||||
M_Alerting_Result_State_Paused = RegCounter("alerting.result", "state", "paused")
|
||||
M_Alerting_Result_State_NoData = RegCounter("alerting.result", "state", "no_data")
|
||||
M_Alerting_Result_State_Pending = RegCounter("alerting.result", "state", "pending")
|
||||
|
||||
M_Alerting_Notification_Sent_Slack = RegCounter("alerting.notifications_sent", "type", "slack")
|
||||
M_Alerting_Notification_Sent_Email = RegCounter("alerting.notifications_sent", "type", "email")
|
||||
M_Alerting_Notification_Sent_Webhook = RegCounter("alerting.notifications_sent", "type", "webhook")
|
||||
M_Alerting_Notification_Sent_DingDing = RegCounter("alerting.notifications_sent", "type", "dingding")
|
||||
M_Alerting_Notification_Sent_PagerDuty = RegCounter("alerting.notifications_sent", "type", "pagerduty")
|
||||
M_Alerting_Notification_Sent_Victorops = RegCounter("alerting.notifications_sent", "type", "victorops")
|
||||
M_Alerting_Notification_Sent_OpsGenie = RegCounter("alerting.notifications_sent", "type", "opsgenie")
|
||||
M_Alerting_Notification_Sent_Telegram = RegCounter("alerting.notifications_sent", "type", "telegram")
|
||||
M_Alerting_Notification_Sent_Threema = RegCounter("alerting.notifications_sent", "type", "threema")
|
||||
M_Alerting_Notification_Sent_Sensu = RegCounter("alerting.notifications_sent", "type", "sensu")
|
||||
M_Alerting_Notification_Sent_LINE = RegCounter("alerting.notifications_sent", "type", "LINE")
|
||||
M_Alerting_Notification_Sent_Pushover = RegCounter("alerting.notifications_sent", "type", "pushover")
|
||||
|
||||
M_Aws_CloudWatch_GetMetricStatistics = RegCounter("aws.cloudwatch.get_metric_statistics")
|
||||
M_Aws_CloudWatch_ListMetrics = RegCounter("aws.cloudwatch.list_metrics")
|
||||
|
||||
M_DB_DataSource_QueryById = RegCounter("db.datasource.query_by_id")
|
||||
M_Api_Dashboard_Snapshot_Create prometheus.Counter
|
||||
M_Api_Dashboard_Snapshot_External prometheus.Counter
|
||||
M_Api_Dashboard_Snapshot_Get prometheus.Counter
|
||||
M_Api_Dashboard_Insert prometheus.Counter
|
||||
M_Alerting_Result_State *prometheus.CounterVec
|
||||
M_Alerting_Notification_Sent *prometheus.CounterVec
|
||||
M_Aws_CloudWatch_GetMetricStatistics prometheus.Counter
|
||||
M_Aws_CloudWatch_ListMetrics prometheus.Counter
|
||||
M_DB_DataSource_QueryById prometheus.Counter
|
||||
|
||||
// Timers
|
||||
M_DataSource_ProxyReq_Timer = RegTimer("api.dataproxy.request.all")
|
||||
M_Alerting_Execution_Time = RegTimer("alerting.execution_time")
|
||||
M_DataSource_ProxyReq_Timer prometheus.Summary
|
||||
M_Alerting_Execution_Time prometheus.Summary
|
||||
|
||||
// StatTotals
|
||||
M_Alerting_Active_Alerts = RegGauge("alerting.active_alerts")
|
||||
M_StatTotal_Dashboards = RegGauge("stat_totals", "stat", "dashboards")
|
||||
M_StatTotal_Users = RegGauge("stat_totals", "stat", "users")
|
||||
M_StatTotal_Orgs = RegGauge("stat_totals", "stat", "orgs")
|
||||
M_StatTotal_Playlists = RegGauge("stat_totals", "stat", "playlists")
|
||||
M_Alerting_Active_Alerts prometheus.Gauge
|
||||
M_StatTotal_Dashboards prometheus.Gauge
|
||||
M_StatTotal_Users prometheus.Gauge
|
||||
M_StatTotal_Orgs prometheus.Gauge
|
||||
M_StatTotal_Playlists prometheus.Gauge
|
||||
)
|
||||
|
||||
func init() {
|
||||
M_Instance_Start = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "instance_start_total",
|
||||
Help: "counter for started instances",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Page_Status = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "page_response_status_total",
|
||||
Help: "page http response status",
|
||||
Namespace: exporterName,
|
||||
},
|
||||
[]string{"code"},
|
||||
)
|
||||
|
||||
M_Api_Status = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "api_response_status_total",
|
||||
Help: "api http response status",
|
||||
Namespace: exporterName,
|
||||
},
|
||||
[]string{"code"},
|
||||
)
|
||||
|
||||
M_Proxy_Status = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "proxy_response_status_total",
|
||||
Help: "proxy http response status",
|
||||
Namespace: exporterName,
|
||||
},
|
||||
[]string{"code"},
|
||||
)
|
||||
|
||||
M_Http_Request_Total = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_request_total",
|
||||
Help: "http request counter",
|
||||
},
|
||||
[]string{"handler", "statuscode", "method"},
|
||||
)
|
||||
|
||||
M_Http_Request_Summary = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "http_request_duration_milleseconds",
|
||||
Help: "http request summary",
|
||||
},
|
||||
[]string{"handler", "statuscode", "method"},
|
||||
)
|
||||
|
||||
M_Api_User_SignUpStarted = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_user_signup_started_total",
|
||||
Help: "amount of users who started the signup flow",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_User_SignUpCompleted = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_user_signup_completed_total",
|
||||
Help: "amount of users who completed the signup flow",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_User_SignUpInvite = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_user_signup_invite_total",
|
||||
Help: "amount of users who have been invited",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Dashboard_Save = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "api_dashboard_save_milleseconds",
|
||||
Help: "summary for dashboard save duration",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Dashboard_Get = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "api_dashboard_get_milleseconds",
|
||||
Help: "summary for dashboard get duration",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Dashboard_Search = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "api_dashboard_search_milleseconds",
|
||||
Help: "summary for dashboard search duration",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Admin_User_Create = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_admin_user_created_total",
|
||||
Help: "api admin user created counter",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Login_Post = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_login_post_total",
|
||||
Help: "api login post counter",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Login_OAuth = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_login_oauth_total",
|
||||
Help: "api login oauth counter",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Org_Create = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_org_create_total",
|
||||
Help: "api org created counter",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Dashboard_Snapshot_Create = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_dashboard_snapshot_create_total",
|
||||
Help: "dashboard snapshots created",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Dashboard_Snapshot_External = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_dashboard_snapshot_external_total",
|
||||
Help: "external dashboard snapshots created",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Dashboard_Snapshot_Get = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_dashboard_snapshot_get_total",
|
||||
Help: "loaded dashboards",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Api_Dashboard_Insert = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_models_dashboard_insert_total",
|
||||
Help: "dashboards inserted ",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Alerting_Result_State = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "alerting_result_total",
|
||||
Help: "alert execution result counter",
|
||||
Namespace: exporterName,
|
||||
}, []string{"state"})
|
||||
|
||||
M_Alerting_Notification_Sent = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "alerting_notification_sent_total",
|
||||
Help: "counter for how many alert notifications been sent",
|
||||
Namespace: exporterName,
|
||||
}, []string{"type"})
|
||||
|
||||
M_Aws_CloudWatch_GetMetricStatistics = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "aws_cloudwatch_get_metric_statistics_total",
|
||||
Help: "counter for getting metric statistics from aws",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Aws_CloudWatch_ListMetrics = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "aws_cloudwatch_list_metrics_total",
|
||||
Help: "counter for getting list of metrics from aws",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_DB_DataSource_QueryById = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "db_datasource_query_by_id_total",
|
||||
Help: "counter for getting datasource by id",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_DataSource_ProxyReq_Timer = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "api_dataproxy_request_all_milleseconds",
|
||||
Help: "summary for dashboard search duration",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Alerting_Execution_Time = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "alerting_execution_time_milliseconds",
|
||||
Help: "summary of alert exeuction duration",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_Alerting_Active_Alerts = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "alerting_active_alerts",
|
||||
Help: "amount of active alerts",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_StatTotal_Dashboards = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "stat_totals_dashboard",
|
||||
Help: "total amount of dashboards",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_StatTotal_Users = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "stat_total_users",
|
||||
Help: "total amount of users",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_StatTotal_Orgs = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "stat_total_orgs",
|
||||
Help: "total amount of orgs",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
M_StatTotal_Playlists = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "stat_total_playlists",
|
||||
Help: "total amount of playlists",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
}
|
||||
|
||||
func initMetricVars(settings *MetricSettings) {
|
||||
prometheus.MustRegister(
|
||||
M_Instance_Start,
|
||||
M_Page_Status,
|
||||
M_Api_Status,
|
||||
M_Proxy_Status,
|
||||
M_Http_Request_Total,
|
||||
M_Http_Request_Summary,
|
||||
M_Api_User_SignUpStarted,
|
||||
M_Api_User_SignUpCompleted,
|
||||
M_Api_User_SignUpInvite,
|
||||
M_Api_Dashboard_Save,
|
||||
M_Api_Dashboard_Get,
|
||||
M_Api_Dashboard_Search,
|
||||
M_DataSource_ProxyReq_Timer,
|
||||
M_Alerting_Execution_Time,
|
||||
M_Api_Admin_User_Create,
|
||||
M_Api_Login_Post,
|
||||
M_Api_Login_OAuth,
|
||||
M_Api_Org_Create,
|
||||
M_Api_Dashboard_Snapshot_Create,
|
||||
M_Api_Dashboard_Snapshot_External,
|
||||
M_Api_Dashboard_Snapshot_Get,
|
||||
M_Api_Dashboard_Insert,
|
||||
M_Alerting_Result_State,
|
||||
M_Alerting_Notification_Sent,
|
||||
M_Aws_CloudWatch_GetMetricStatistics,
|
||||
M_Aws_CloudWatch_ListMetrics,
|
||||
M_DB_DataSource_QueryById,
|
||||
M_Alerting_Active_Alerts,
|
||||
M_StatTotal_Dashboards,
|
||||
M_StatTotal_Users,
|
||||
M_StatTotal_Orgs,
|
||||
M_StatTotal_Playlists)
|
||||
|
||||
go instrumentationLoop(settings)
|
||||
}
|
||||
|
||||
func instrumentationLoop(settings *MetricSettings) chan struct{} {
|
||||
M_Instance_Start.Inc()
|
||||
|
||||
onceEveryDayTick := time.NewTicker(time.Hour * 24)
|
||||
secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds))
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-onceEveryDayTick.C:
|
||||
sendUsageStats()
|
||||
case <-secondTicker.C:
|
||||
updateTotalStats()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var metricPublishCounter int64 = 0
|
||||
|
||||
func updateTotalStats() {
|
||||
metricPublishCounter++
|
||||
if metricPublishCounter == 1 || metricPublishCounter%10 == 0 {
|
||||
statsQuery := models.GetSystemStatsQuery{}
|
||||
if err := bus.Dispatch(&statsQuery); err != nil {
|
||||
metricsLogger.Error("Failed to get system stats", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
M_StatTotal_Dashboards.Set(float64(statsQuery.Result.Dashboards))
|
||||
M_StatTotal_Users.Set(float64(statsQuery.Result.Users))
|
||||
M_StatTotal_Playlists.Set(float64(statsQuery.Result.Playlists))
|
||||
M_StatTotal_Orgs.Set(float64(statsQuery.Result.Orgs))
|
||||
}
|
||||
}
|
||||
|
||||
func sendUsageStats() {
|
||||
if !setting.ReportingEnabled {
|
||||
return
|
||||
}
|
||||
|
||||
metricsLogger.Debug("Sending anonymous usage stats to stats.grafana.org")
|
||||
|
||||
version := strings.Replace(setting.BuildVersion, ".", "_", -1)
|
||||
|
||||
metrics := map[string]interface{}{}
|
||||
report := map[string]interface{}{
|
||||
"version": version,
|
||||
"metrics": metrics,
|
||||
"os": runtime.GOOS,
|
||||
"arch": runtime.GOARCH,
|
||||
}
|
||||
|
||||
statsQuery := models.GetSystemStatsQuery{}
|
||||
if err := bus.Dispatch(&statsQuery); err != nil {
|
||||
metricsLogger.Error("Failed to get system stats", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
metrics["stats.dashboards.count"] = statsQuery.Result.Dashboards
|
||||
metrics["stats.users.count"] = statsQuery.Result.Users
|
||||
metrics["stats.orgs.count"] = statsQuery.Result.Orgs
|
||||
metrics["stats.playlist.count"] = statsQuery.Result.Playlists
|
||||
metrics["stats.plugins.apps.count"] = len(plugins.Apps)
|
||||
metrics["stats.plugins.panels.count"] = len(plugins.Panels)
|
||||
metrics["stats.plugins.datasources.count"] = len(plugins.DataSources)
|
||||
metrics["stats.alerts.count"] = statsQuery.Result.Alerts
|
||||
metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers
|
||||
metrics["stats.datasources.count"] = statsQuery.Result.Datasources
|
||||
|
||||
dsStats := models.GetDataSourceStatsQuery{}
|
||||
if err := bus.Dispatch(&dsStats); err != nil {
|
||||
metricsLogger.Error("Failed to get datasource stats", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// send counters for each data source
|
||||
// but ignore any custom data sources
|
||||
// as sending that name could be sensitive information
|
||||
dsOtherCount := 0
|
||||
for _, dsStat := range dsStats.Result {
|
||||
if models.IsKnownDataSourcePlugin(dsStat.Type) {
|
||||
metrics["stats.ds."+dsStat.Type+".count"] = dsStat.Count
|
||||
} else {
|
||||
dsOtherCount += dsStat.Count
|
||||
}
|
||||
}
|
||||
metrics["stats.ds.other.count"] = dsOtherCount
|
||||
|
||||
out, _ := json.MarshalIndent(report, "", " ")
|
||||
data := bytes.NewBuffer(out)
|
||||
|
||||
client := http.Client{Timeout: time.Duration(5 * time.Second)}
|
||||
go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data)
|
||||
}
|
||||
|
@ -1,135 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
|
||||
var metricsLogger log.Logger = log.New("metrics")
|
||||
var metricPublishCounter int64 = 0
|
||||
|
||||
func Init() {
|
||||
settings := readSettings()
|
||||
initMetricVars(settings)
|
||||
go instrumentationLoop(settings)
|
||||
}
|
||||
|
||||
func instrumentationLoop(settings *MetricSettings) chan struct{} {
|
||||
M_Instance_Start.Inc(1)
|
||||
|
||||
onceEveryDayTick := time.NewTicker(time.Hour * 24)
|
||||
secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds))
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-onceEveryDayTick.C:
|
||||
sendUsageStats()
|
||||
case <-secondTicker.C:
|
||||
if settings.Enabled {
|
||||
sendMetrics(settings)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sendMetrics(settings *MetricSettings) {
|
||||
if len(settings.Publishers) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
updateTotalStats()
|
||||
|
||||
metrics := MetricStats.GetSnapshots()
|
||||
for _, publisher := range settings.Publishers {
|
||||
publisher.Publish(metrics)
|
||||
}
|
||||
}
|
||||
|
||||
func updateTotalStats() {
|
||||
|
||||
// every interval also publish totals
|
||||
metricPublishCounter++
|
||||
if metricPublishCounter%10 == 0 {
|
||||
// get stats
|
||||
statsQuery := m.GetSystemStatsQuery{}
|
||||
if err := bus.Dispatch(&statsQuery); err != nil {
|
||||
metricsLogger.Error("Failed to get system stats", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
M_StatTotal_Dashboards.Update(statsQuery.Result.Dashboards)
|
||||
M_StatTotal_Users.Update(statsQuery.Result.Users)
|
||||
M_StatTotal_Playlists.Update(statsQuery.Result.Playlists)
|
||||
M_StatTotal_Orgs.Update(statsQuery.Result.Orgs)
|
||||
}
|
||||
}
|
||||
|
||||
func sendUsageStats() {
|
||||
if !setting.ReportingEnabled {
|
||||
return
|
||||
}
|
||||
|
||||
metricsLogger.Debug("Sending anonymous usage stats to stats.grafana.org")
|
||||
|
||||
version := strings.Replace(setting.BuildVersion, ".", "_", -1)
|
||||
|
||||
metrics := map[string]interface{}{}
|
||||
report := map[string]interface{}{
|
||||
"version": version,
|
||||
"metrics": metrics,
|
||||
"os": runtime.GOOS,
|
||||
"arch": runtime.GOARCH,
|
||||
}
|
||||
|
||||
statsQuery := m.GetSystemStatsQuery{}
|
||||
if err := bus.Dispatch(&statsQuery); err != nil {
|
||||
metricsLogger.Error("Failed to get system stats", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
metrics["stats.dashboards.count"] = statsQuery.Result.Dashboards
|
||||
metrics["stats.users.count"] = statsQuery.Result.Users
|
||||
metrics["stats.orgs.count"] = statsQuery.Result.Orgs
|
||||
metrics["stats.playlist.count"] = statsQuery.Result.Playlists
|
||||
metrics["stats.plugins.apps.count"] = len(plugins.Apps)
|
||||
metrics["stats.plugins.panels.count"] = len(plugins.Panels)
|
||||
metrics["stats.plugins.datasources.count"] = len(plugins.DataSources)
|
||||
metrics["stats.alerts.count"] = statsQuery.Result.Alerts
|
||||
metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers
|
||||
metrics["stats.datasources.count"] = statsQuery.Result.Datasources
|
||||
|
||||
dsStats := m.GetDataSourceStatsQuery{}
|
||||
if err := bus.Dispatch(&dsStats); err != nil {
|
||||
metricsLogger.Error("Failed to get datasource stats", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// send counters for each data source
|
||||
// but ignore any custom data sources
|
||||
// as sending that name could be sensitive information
|
||||
dsOtherCount := 0
|
||||
for _, dsStat := range dsStats.Result {
|
||||
if m.IsKnownDataSourcePlugin(dsStat.Type) {
|
||||
metrics["stats.ds."+dsStat.Type+".count"] = dsStat.Count
|
||||
} else {
|
||||
dsOtherCount += dsStat.Count
|
||||
}
|
||||
}
|
||||
metrics["stats.ds.other.count"] = dsOtherCount
|
||||
|
||||
out, _ := json.MarshalIndent(report, "", " ")
|
||||
data := bytes.NewBuffer(out)
|
||||
|
||||
client := http.Client{Timeout: time.Duration(5 * time.Second)}
|
||||
go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data)
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import "sync"
|
||||
|
||||
type Registry interface {
|
||||
GetSnapshots() []Metric
|
||||
Register(metric Metric)
|
||||
}
|
||||
|
||||
// The standard implementation of a Registry is a mutex-protected map
|
||||
// of names to metrics.
|
||||
type StandardRegistry struct {
|
||||
metrics []Metric
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// Create a new registry.
|
||||
func NewRegistry() Registry {
|
||||
return &StandardRegistry{
|
||||
metrics: make([]Metric, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *StandardRegistry) Register(metric Metric) {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
r.metrics = append(r.metrics, metric)
|
||||
}
|
||||
|
||||
// Call the given function for each registered metric.
|
||||
func (r *StandardRegistry) GetSnapshots() []Metric {
|
||||
metrics := make([]Metric, len(r.metrics))
|
||||
for i, metric := range r.metrics {
|
||||
metrics[i] = metric.Snapshot()
|
||||
}
|
||||
return metrics
|
||||
}
|
@ -1,607 +0,0 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const rescaleThreshold = time.Hour
|
||||
|
||||
// Samples maintain a statistically-significant selection of values from
|
||||
// a stream.
|
||||
type Sample interface {
|
||||
Clear()
|
||||
Count() int64
|
||||
Max() int64
|
||||
Mean() float64
|
||||
Min() int64
|
||||
Percentile(float64) float64
|
||||
Percentiles([]float64) []float64
|
||||
Size() int
|
||||
Snapshot() Sample
|
||||
StdDev() float64
|
||||
Sum() int64
|
||||
Update(int64)
|
||||
Values() []int64
|
||||
Variance() float64
|
||||
}
|
||||
|
||||
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
|
||||
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
|
||||
// Decay Model for Streaming Systems".
|
||||
//
|
||||
// <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
|
||||
type ExpDecaySample struct {
|
||||
alpha float64
|
||||
count int64
|
||||
mutex sync.Mutex
|
||||
reservoirSize int
|
||||
t0, t1 time.Time
|
||||
values *expDecaySampleHeap
|
||||
}
|
||||
|
||||
// NewExpDecaySample constructs a new exponentially-decaying sample with the
|
||||
// given reservoir size and alpha.
|
||||
func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
|
||||
s := &ExpDecaySample{
|
||||
alpha: alpha,
|
||||
reservoirSize: reservoirSize,
|
||||
t0: time.Now(),
|
||||
values: newExpDecaySampleHeap(reservoirSize),
|
||||
}
|
||||
s.t1 = s.t0.Add(rescaleThreshold)
|
||||
return s
|
||||
}
|
||||
|
||||
// Clear clears all samples.
|
||||
func (s *ExpDecaySample) Clear() {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.count = 0
|
||||
s.t0 = time.Now()
|
||||
s.t1 = s.t0.Add(rescaleThreshold)
|
||||
s.values.Clear()
|
||||
}
|
||||
|
||||
// Count returns the number of samples recorded, which may exceed the
|
||||
// reservoir size.
|
||||
func (s *ExpDecaySample) Count() int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return s.count
|
||||
}
|
||||
|
||||
// Max returns the maximum value in the sample, which may not be the maximum
|
||||
// value ever to be part of the sample.
|
||||
func (s *ExpDecaySample) Max() int64 {
|
||||
return SampleMax(s.Values())
|
||||
}
|
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (s *ExpDecaySample) Mean() float64 {
|
||||
return SampleMean(s.Values())
|
||||
}
|
||||
|
||||
// Min returns the minimum value in the sample, which may not be the minimum
|
||||
// value ever to be part of the sample.
|
||||
func (s *ExpDecaySample) Min() int64 {
|
||||
return SampleMin(s.Values())
|
||||
}
|
||||
|
||||
// Percentile returns an arbitrary percentile of values in the sample.
|
||||
func (s *ExpDecaySample) Percentile(p float64) float64 {
|
||||
return SamplePercentile(s.Values(), p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values in the
|
||||
// sample.
|
||||
func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
|
||||
return SamplePercentiles(s.Values(), ps)
|
||||
}
|
||||
|
||||
// Size returns the size of the sample, which is at most the reservoir size.
|
||||
func (s *ExpDecaySample) Size() int {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return s.values.Size()
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the sample.
|
||||
func (s *ExpDecaySample) Snapshot() Sample {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
vals := s.values.Values()
|
||||
values := make([]int64, len(vals))
|
||||
for i, v := range vals {
|
||||
values[i] = v.v
|
||||
}
|
||||
return &SampleSnapshot{
|
||||
count: s.count,
|
||||
values: values,
|
||||
}
|
||||
}
|
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (s *ExpDecaySample) StdDev() float64 {
|
||||
return SampleStdDev(s.Values())
|
||||
}
|
||||
|
||||
// Sum returns the sum of the values in the sample.
|
||||
func (s *ExpDecaySample) Sum() int64 {
|
||||
return SampleSum(s.Values())
|
||||
}
|
||||
|
||||
// Update samples a new value.
|
||||
func (s *ExpDecaySample) Update(v int64) {
|
||||
s.update(time.Now(), v)
|
||||
}
|
||||
|
||||
// Values returns a copy of the values in the sample.
|
||||
func (s *ExpDecaySample) Values() []int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
vals := s.values.Values()
|
||||
values := make([]int64, len(vals))
|
||||
for i, v := range vals {
|
||||
values[i] = v.v
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (s *ExpDecaySample) Variance() float64 {
|
||||
return SampleVariance(s.Values())
|
||||
}
|
||||
|
||||
// update samples a new value at a particular timestamp. This is a method all
|
||||
// its own to facilitate testing.
|
||||
func (s *ExpDecaySample) update(t time.Time, v int64) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.count++
|
||||
if s.values.Size() == s.reservoirSize {
|
||||
s.values.Pop()
|
||||
}
|
||||
s.values.Push(expDecaySample{
|
||||
k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
|
||||
v: v,
|
||||
})
|
||||
if t.After(s.t1) {
|
||||
values := s.values.Values()
|
||||
t0 := s.t0
|
||||
s.values.Clear()
|
||||
s.t0 = t
|
||||
s.t1 = s.t0.Add(rescaleThreshold)
|
||||
for _, v := range values {
|
||||
v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
|
||||
s.values.Push(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NilSample is a no-op Sample.
|
||||
type NilSample struct{}
|
||||
|
||||
// Clear is a no-op.
|
||||
func (NilSample) Clear() {}
|
||||
|
||||
// Count is a no-op.
|
||||
func (NilSample) Count() int64 { return 0 }
|
||||
|
||||
// Max is a no-op.
|
||||
func (NilSample) Max() int64 { return 0 }
|
||||
|
||||
// Mean is a no-op.
|
||||
func (NilSample) Mean() float64 { return 0.0 }
|
||||
|
||||
// Min is a no-op.
|
||||
func (NilSample) Min() int64 { return 0 }
|
||||
|
||||
// Percentile is a no-op.
|
||||
func (NilSample) Percentile(p float64) float64 { return 0.0 }
|
||||
|
||||
// Percentiles is a no-op.
|
||||
func (NilSample) Percentiles(ps []float64) []float64 {
|
||||
return make([]float64, len(ps))
|
||||
}
|
||||
|
||||
// Size is a no-op.
|
||||
func (NilSample) Size() int { return 0 }
|
||||
|
||||
// Sample is a no-op.
|
||||
func (NilSample) Snapshot() Sample { return NilSample{} }
|
||||
|
||||
// StdDev is a no-op.
|
||||
func (NilSample) StdDev() float64 { return 0.0 }
|
||||
|
||||
// Sum is a no-op.
|
||||
func (NilSample) Sum() int64 { return 0 }
|
||||
|
||||
// Update is a no-op.
|
||||
func (NilSample) Update(v int64) {}
|
||||
|
||||
// Values is a no-op.
|
||||
func (NilSample) Values() []int64 { return []int64{} }
|
||||
|
||||
// Variance is a no-op.
|
||||
func (NilSample) Variance() float64 { return 0.0 }
|
||||
|
||||
// SampleMax returns the maximum value of the slice of int64.
|
||||
func SampleMax(values []int64) int64 {
|
||||
if 0 == len(values) {
|
||||
return 0
|
||||
}
|
||||
var max int64 = math.MinInt64
|
||||
for _, v := range values {
|
||||
if max < v {
|
||||
max = v
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// SampleMean returns the mean value of the slice of int64.
|
||||
func SampleMean(values []int64) float64 {
|
||||
if 0 == len(values) {
|
||||
return 0.0
|
||||
}
|
||||
return float64(SampleSum(values)) / float64(len(values))
|
||||
}
|
||||
|
||||
// SampleMin returns the minimum value of the slice of int64.
|
||||
func SampleMin(values []int64) int64 {
|
||||
if 0 == len(values) {
|
||||
return 0
|
||||
}
|
||||
var min int64 = math.MaxInt64
|
||||
for _, v := range values {
|
||||
if min > v {
|
||||
min = v
|
||||
}
|
||||
}
|
||||
return min
|
||||
}
|
||||
|
||||
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
|
||||
func SamplePercentile(values int64Slice, p float64) float64 {
|
||||
return SamplePercentiles(values, []float64{p})[0]
|
||||
}
|
||||
|
||||
// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
|
||||
// int64.
|
||||
func SamplePercentiles(values int64Slice, ps []float64) []float64 {
|
||||
scores := make([]float64, len(ps))
|
||||
size := len(values)
|
||||
if size > 0 {
|
||||
sort.Sort(values)
|
||||
for i, p := range ps {
|
||||
pos := p * float64(size+1)
|
||||
if pos < 1.0 {
|
||||
scores[i] = float64(values[0])
|
||||
} else if pos >= float64(size) {
|
||||
scores[i] = float64(values[size-1])
|
||||
} else {
|
||||
lower := float64(values[int(pos)-1])
|
||||
upper := float64(values[int(pos)])
|
||||
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
|
||||
}
|
||||
}
|
||||
}
|
||||
return scores
|
||||
}
|
||||
|
||||
// SampleSnapshot is a read-only copy of another Sample.
|
||||
type SampleSnapshot struct {
|
||||
count int64
|
||||
values []int64
|
||||
}
|
||||
|
||||
// Clear panics.
|
||||
func (*SampleSnapshot) Clear() {
|
||||
panic("Clear called on a SampleSnapshot")
|
||||
}
|
||||
|
||||
// Count returns the count of inputs at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Count() int64 { return s.count }
|
||||
|
||||
// Max returns the maximal value at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
|
||||
|
||||
// Mean returns the mean value at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
|
||||
|
||||
// Min returns the minimal value at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
|
||||
|
||||
// Percentile returns an arbitrary percentile of values at the time the
|
||||
// snapshot was taken.
|
||||
func (s *SampleSnapshot) Percentile(p float64) float64 {
|
||||
return SamplePercentile(s.values, p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values at the time
|
||||
// the snapshot was taken.
|
||||
func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
|
||||
return SamplePercentiles(s.values, ps)
|
||||
}
|
||||
|
||||
// Size returns the size of the sample at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Size() int { return len(s.values) }
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (s *SampleSnapshot) Snapshot() Sample { return s }
|
||||
|
||||
// StdDev returns the standard deviation of values at the time the snapshot was
|
||||
// taken.
|
||||
func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
|
||||
|
||||
// Sum returns the sum of values at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
|
||||
|
||||
// Update panics.
|
||||
func (*SampleSnapshot) Update(int64) {
|
||||
panic("Update called on a SampleSnapshot")
|
||||
}
|
||||
|
||||
// Values returns a copy of the values in the sample.
|
||||
func (s *SampleSnapshot) Values() []int64 {
|
||||
values := make([]int64, len(s.values))
|
||||
copy(values, s.values)
|
||||
return values
|
||||
}
|
||||
|
||||
// Variance returns the variance of values at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
|
||||
|
||||
// SampleStdDev returns the standard deviation of the slice of int64.
|
||||
func SampleStdDev(values []int64) float64 {
|
||||
return math.Sqrt(SampleVariance(values))
|
||||
}
|
||||
|
||||
// SampleSum returns the sum of the slice of int64.
|
||||
func SampleSum(values []int64) int64 {
|
||||
var sum int64
|
||||
for _, v := range values {
|
||||
sum += v
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// SampleVariance returns the variance of the slice of int64.
|
||||
func SampleVariance(values []int64) float64 {
|
||||
if 0 == len(values) {
|
||||
return 0.0
|
||||
}
|
||||
m := SampleMean(values)
|
||||
var sum float64
|
||||
for _, v := range values {
|
||||
d := float64(v) - m
|
||||
sum += d * d
|
||||
}
|
||||
return sum / float64(len(values))
|
||||
}
|
||||
|
||||
// A uniform sample using Vitter's Algorithm R.
|
||||
//
|
||||
// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
|
||||
type UniformSample struct {
|
||||
count int64
|
||||
mutex sync.Mutex
|
||||
reservoirSize int
|
||||
values []int64
|
||||
}
|
||||
|
||||
// NewUniformSample constructs a new uniform sample with the given reservoir
|
||||
// size.
|
||||
func NewUniformSample(reservoirSize int) Sample {
|
||||
return &UniformSample{
|
||||
reservoirSize: reservoirSize,
|
||||
values: make([]int64, 0, reservoirSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Clear clears all samples.
|
||||
func (s *UniformSample) Clear() {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.count = 0
|
||||
s.values = make([]int64, 0, s.reservoirSize)
|
||||
}
|
||||
|
||||
// Count returns the number of samples recorded, which may exceed the
|
||||
// reservoir size.
|
||||
func (s *UniformSample) Count() int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return s.count
|
||||
}
|
||||
|
||||
// Max returns the maximum value in the sample, which may not be the maximum
|
||||
// value ever to be part of the sample.
|
||||
func (s *UniformSample) Max() int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleMax(s.values)
|
||||
}
|
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (s *UniformSample) Mean() float64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleMean(s.values)
|
||||
}
|
||||
|
||||
// Min returns the minimum value in the sample, which may not be the minimum
|
||||
// value ever to be part of the sample.
|
||||
func (s *UniformSample) Min() int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleMin(s.values)
|
||||
}
|
||||
|
||||
// Percentile returns an arbitrary percentile of values in the sample.
|
||||
func (s *UniformSample) Percentile(p float64) float64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SamplePercentile(s.values, p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values in the
|
||||
// sample.
|
||||
func (s *UniformSample) Percentiles(ps []float64) []float64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SamplePercentiles(s.values, ps)
|
||||
}
|
||||
|
||||
// Size returns the size of the sample, which is at most the reservoir size.
|
||||
func (s *UniformSample) Size() int {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the sample.
|
||||
func (s *UniformSample) Snapshot() Sample {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
values := make([]int64, len(s.values))
|
||||
copy(values, s.values)
|
||||
return &SampleSnapshot{
|
||||
count: s.count,
|
||||
values: values,
|
||||
}
|
||||
}
|
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (s *UniformSample) StdDev() float64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleStdDev(s.values)
|
||||
}
|
||||
|
||||
// Sum returns the sum of the values in the sample.
|
||||
func (s *UniformSample) Sum() int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleSum(s.values)
|
||||
}
|
||||
|
||||
// Update samples a new value.
|
||||
func (s *UniformSample) Update(v int64) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.count++
|
||||
if len(s.values) < s.reservoirSize {
|
||||
s.values = append(s.values, v)
|
||||
} else {
|
||||
r := rand.Int63n(s.count)
|
||||
if r < int64(len(s.values)) {
|
||||
s.values[int(r)] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Values returns a copy of the values in the sample.
|
||||
func (s *UniformSample) Values() []int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
values := make([]int64, len(s.values))
|
||||
copy(values, s.values)
|
||||
return values
|
||||
}
|
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (s *UniformSample) Variance() float64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleVariance(s.values)
|
||||
}
|
||||
|
||||
// expDecaySample represents an individual sample in a heap.
|
||||
type expDecaySample struct {
|
||||
k float64
|
||||
v int64
|
||||
}
|
||||
|
||||
func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
|
||||
return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
|
||||
}
|
||||
|
||||
// expDecaySampleHeap is a min-heap of expDecaySamples.
|
||||
// The internal implementation is copied from the standard library's container/heap
|
||||
type expDecaySampleHeap struct {
|
||||
s []expDecaySample
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) Clear() {
|
||||
h.s = h.s[:0]
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) Push(s expDecaySample) {
|
||||
n := len(h.s)
|
||||
h.s = h.s[0 : n+1]
|
||||
h.s[n] = s
|
||||
h.up(n)
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) Pop() expDecaySample {
|
||||
n := len(h.s) - 1
|
||||
h.s[0], h.s[n] = h.s[n], h.s[0]
|
||||
h.down(0, n)
|
||||
|
||||
n = len(h.s)
|
||||
s := h.s[n-1]
|
||||
h.s = h.s[0 : n-1]
|
||||
return s
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) Size() int {
|
||||
return len(h.s)
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) Values() []expDecaySample {
|
||||
return h.s
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) up(j int) {
|
||||
for {
|
||||
i := (j - 1) / 2 // parent
|
||||
if i == j || !(h.s[j].k < h.s[i].k) {
|
||||
break
|
||||
}
|
||||
h.s[i], h.s[j] = h.s[j], h.s[i]
|
||||
j = i
|
||||
}
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) down(i, n int) {
|
||||
for {
|
||||
j1 := 2*i + 1
|
||||
if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
|
||||
break
|
||||
}
|
||||
j := j1 // left child
|
||||
if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
|
||||
j = j2 // = 2*i + 2 // right child
|
||||
}
|
||||
if !(h.s[j].k < h.s[i].k) {
|
||||
break
|
||||
}
|
||||
h.s[i], h.s[j] = h.s[j], h.s[i]
|
||||
i = j
|
||||
}
|
||||
}
|
||||
|
||||
type int64Slice []int64
|
||||
|
||||
func (p int64Slice) Len() int { return len(p) }
|
||||
func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
@ -1,367 +0,0 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
|
||||
// expensive computations like Variance, the cost of copying the Sample, as
|
||||
// approximated by a make and copy, is much greater than the cost of the
|
||||
// computation for small samples and only slightly less for large samples.
|
||||
func BenchmarkCompute1000(b *testing.B) {
|
||||
s := make([]int64, 1000)
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = int64(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
SampleVariance(s)
|
||||
}
|
||||
}
|
||||
func BenchmarkCompute1000000(b *testing.B) {
|
||||
s := make([]int64, 1000000)
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = int64(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
SampleVariance(s)
|
||||
}
|
||||
}
|
||||
func BenchmarkCopy1000(b *testing.B) {
|
||||
s := make([]int64, 1000)
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = int64(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
sCopy := make([]int64, len(s))
|
||||
copy(sCopy, s)
|
||||
}
|
||||
}
|
||||
func BenchmarkCopy1000000(b *testing.B) {
|
||||
s := make([]int64, 1000000)
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = int64(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
sCopy := make([]int64, len(s))
|
||||
copy(sCopy, s)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExpDecaySample257(b *testing.B) {
|
||||
benchmarkSample(b, NewExpDecaySample(257, 0.015))
|
||||
}
|
||||
|
||||
func BenchmarkExpDecaySample514(b *testing.B) {
|
||||
benchmarkSample(b, NewExpDecaySample(514, 0.015))
|
||||
}
|
||||
|
||||
func BenchmarkExpDecaySample1028(b *testing.B) {
|
||||
benchmarkSample(b, NewExpDecaySample(1028, 0.015))
|
||||
}
|
||||
|
||||
func BenchmarkUniformSample257(b *testing.B) {
|
||||
benchmarkSample(b, NewUniformSample(257))
|
||||
}
|
||||
|
||||
func BenchmarkUniformSample514(b *testing.B) {
|
||||
benchmarkSample(b, NewUniformSample(514))
|
||||
}
|
||||
|
||||
func BenchmarkUniformSample1028(b *testing.B) {
|
||||
benchmarkSample(b, NewUniformSample(1028))
|
||||
}
|
||||
|
||||
func TestExpDecaySample10(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(100, 0.99)
|
||||
for i := 0; i < 10; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
if size := s.Count(); 10 != size {
|
||||
t.Errorf("s.Count(): 10 != %v\n", size)
|
||||
}
|
||||
if size := s.Size(); 10 != size {
|
||||
t.Errorf("s.Size(): 10 != %v\n", size)
|
||||
}
|
||||
if l := len(s.Values()); 10 != l {
|
||||
t.Errorf("len(s.Values()): 10 != %v\n", l)
|
||||
}
|
||||
for _, v := range s.Values() {
|
||||
if v > 10 || v < 0 {
|
||||
t.Errorf("out of range [0, 10): %v\n", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpDecaySample100(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(1000, 0.01)
|
||||
for i := 0; i < 100; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
if size := s.Count(); 100 != size {
|
||||
t.Errorf("s.Count(): 100 != %v\n", size)
|
||||
}
|
||||
if size := s.Size(); 100 != size {
|
||||
t.Errorf("s.Size(): 100 != %v\n", size)
|
||||
}
|
||||
if l := len(s.Values()); 100 != l {
|
||||
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
||||
}
|
||||
for _, v := range s.Values() {
|
||||
if v > 100 || v < 0 {
|
||||
t.Errorf("out of range [0, 100): %v\n", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpDecaySample1000(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(100, 0.99)
|
||||
for i := 0; i < 1000; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
if size := s.Count(); 1000 != size {
|
||||
t.Errorf("s.Count(): 1000 != %v\n", size)
|
||||
}
|
||||
if size := s.Size(); 100 != size {
|
||||
t.Errorf("s.Size(): 100 != %v\n", size)
|
||||
}
|
||||
if l := len(s.Values()); 100 != l {
|
||||
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
||||
}
|
||||
for _, v := range s.Values() {
|
||||
if v > 1000 || v < 0 {
|
||||
t.Errorf("out of range [0, 1000): %v\n", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This test makes sure that the sample's priority is not amplified by using
|
||||
// nanosecond duration since start rather than second duration since start.
|
||||
// The priority becomes +Inf quickly after starting if this is done,
|
||||
// effectively freezing the set of samples until a rescale step happens.
|
||||
func TestExpDecaySampleNanosecondRegression(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(100, 0.99)
|
||||
for i := 0; i < 100; i++ {
|
||||
s.Update(10)
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
for i := 0; i < 100; i++ {
|
||||
s.Update(20)
|
||||
}
|
||||
v := s.Values()
|
||||
avg := float64(0)
|
||||
for i := 0; i < len(v); i++ {
|
||||
avg += float64(v[i])
|
||||
}
|
||||
avg /= float64(len(v))
|
||||
if avg > 16 || avg < 14 {
|
||||
t.Errorf("out of range [14, 16]: %v\n", avg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpDecaySampleRescale(t *testing.T) {
|
||||
s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
|
||||
s.update(time.Now(), 1)
|
||||
s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
|
||||
for _, v := range s.values.Values() {
|
||||
if v.k == 0.0 {
|
||||
t.Fatal("v.k == 0.0")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpDecaySampleSnapshot(t *testing.T) {
|
||||
now := time.Now()
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(100, 0.99)
|
||||
for i := 1; i <= 10000; i++ {
|
||||
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
|
||||
}
|
||||
snapshot := s.Snapshot()
|
||||
s.Update(1)
|
||||
testExpDecaySampleStatistics(t, snapshot)
|
||||
}
|
||||
|
||||
func TestExpDecaySampleStatistics(t *testing.T) {
|
||||
now := time.Now()
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(100, 0.99)
|
||||
for i := 1; i <= 10000; i++ {
|
||||
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
|
||||
}
|
||||
testExpDecaySampleStatistics(t, s)
|
||||
}
|
||||
|
||||
func TestUniformSample(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewUniformSample(100)
|
||||
for i := 0; i < 1000; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
if size := s.Count(); 1000 != size {
|
||||
t.Errorf("s.Count(): 1000 != %v\n", size)
|
||||
}
|
||||
if size := s.Size(); 100 != size {
|
||||
t.Errorf("s.Size(): 100 != %v\n", size)
|
||||
}
|
||||
if l := len(s.Values()); 100 != l {
|
||||
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
||||
}
|
||||
for _, v := range s.Values() {
|
||||
if v > 1000 || v < 0 {
|
||||
t.Errorf("out of range [0, 100): %v\n", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniformSampleIncludesTail(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewUniformSample(100)
|
||||
max := 100
|
||||
for i := 0; i < max; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
v := s.Values()
|
||||
sum := 0
|
||||
exp := (max - 1) * max / 2
|
||||
for i := 0; i < len(v); i++ {
|
||||
sum += int(v[i])
|
||||
}
|
||||
if exp != sum {
|
||||
t.Errorf("sum: %v != %v\n", exp, sum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniformSampleSnapshot(t *testing.T) {
|
||||
s := NewUniformSample(100)
|
||||
for i := 1; i <= 10000; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
snapshot := s.Snapshot()
|
||||
s.Update(1)
|
||||
testUniformSampleStatistics(t, snapshot)
|
||||
}
|
||||
|
||||
func TestUniformSampleStatistics(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewUniformSample(100)
|
||||
for i := 1; i <= 10000; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
testUniformSampleStatistics(t, s)
|
||||
}
|
||||
|
||||
func benchmarkSample(b *testing.B, s Sample) {
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
pauseTotalNs := memStats.PauseTotalNs
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Update(1)
|
||||
}
|
||||
b.StopTimer()
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&memStats)
|
||||
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
|
||||
}
|
||||
|
||||
func testExpDecaySampleStatistics(t *testing.T, s Sample) {
|
||||
if count := s.Count(); 10000 != count {
|
||||
t.Errorf("s.Count(): 10000 != %v\n", count)
|
||||
}
|
||||
if min := s.Min(); 107 != min {
|
||||
t.Errorf("s.Min(): 107 != %v\n", min)
|
||||
}
|
||||
if max := s.Max(); 10000 != max {
|
||||
t.Errorf("s.Max(): 10000 != %v\n", max)
|
||||
}
|
||||
if mean := s.Mean(); 4965.98 != mean {
|
||||
t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
|
||||
}
|
||||
if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
|
||||
t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
|
||||
}
|
||||
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||
if 4615 != ps[0] {
|
||||
t.Errorf("median: 4615 != %v\n", ps[0])
|
||||
}
|
||||
if 7672 != ps[1] {
|
||||
t.Errorf("75th percentile: 7672 != %v\n", ps[1])
|
||||
}
|
||||
if 9998.99 != ps[2] {
|
||||
t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
|
||||
}
|
||||
}
|
||||
|
||||
func testUniformSampleStatistics(t *testing.T, s Sample) {
|
||||
if count := s.Count(); 10000 != count {
|
||||
t.Errorf("s.Count(): 10000 != %v\n", count)
|
||||
}
|
||||
if min := s.Min(); 37 != min {
|
||||
t.Errorf("s.Min(): 37 != %v\n", min)
|
||||
}
|
||||
if max := s.Max(); 9989 != max {
|
||||
t.Errorf("s.Max(): 9989 != %v\n", max)
|
||||
}
|
||||
if mean := s.Mean(); 4748.14 != mean {
|
||||
t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
|
||||
}
|
||||
if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
|
||||
t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
|
||||
}
|
||||
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||
if 4599 != ps[0] {
|
||||
t.Errorf("median: 4599 != %v\n", ps[0])
|
||||
}
|
||||
if 7380.5 != ps[1] {
|
||||
t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
|
||||
}
|
||||
if 9986.429999999998 != ps[2] {
|
||||
t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
|
||||
}
|
||||
}
|
||||
|
||||
// TestUniformSampleConcurrentUpdateCount would expose data race problems with
|
||||
// concurrent Update and Count calls on Sample when test is called with -race
|
||||
// argument
|
||||
func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
s := NewUniformSample(100)
|
||||
for i := 0; i < 100; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
quit := make(chan struct{})
|
||||
go func() {
|
||||
t := time.NewTicker(10 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
s.Update(rand.Int63())
|
||||
case <-quit:
|
||||
t.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
for i := 0; i < 1000; i++ {
|
||||
s.Count()
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
quit <- struct{}{}
|
||||
}
|
@ -1,25 +1,27 @@
|
||||
package metrics
|
||||
|
||||
import "github.com/grafana/grafana/pkg/setting"
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
type MetricPublisher interface {
|
||||
Publish(metrics []Metric)
|
||||
}
|
||||
"github.com/grafana/grafana/pkg/metrics/graphitebridge"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
ini "gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
type MetricSettings struct {
|
||||
Enabled bool
|
||||
IntervalSeconds int64
|
||||
|
||||
Publishers []MetricPublisher
|
||||
Enabled bool
|
||||
IntervalSeconds int64
|
||||
GraphiteBridgeConfig *graphitebridge.Config
|
||||
}
|
||||
|
||||
func readSettings() *MetricSettings {
|
||||
func ReadSettings(file *ini.File) *MetricSettings {
|
||||
var settings = &MetricSettings{
|
||||
Enabled: false,
|
||||
Publishers: make([]MetricPublisher, 0),
|
||||
Enabled: false,
|
||||
}
|
||||
|
||||
var section, err = setting.Cfg.GetSection("metrics")
|
||||
var section, err = file.GetSection("metrics")
|
||||
if err != nil {
|
||||
metricsLogger.Crit("Unable to find metrics config section", "error", err)
|
||||
return nil
|
||||
@ -32,12 +34,46 @@ func readSettings() *MetricSettings {
|
||||
return settings
|
||||
}
|
||||
|
||||
if graphitePublisher, err := CreateGraphitePublisher(); err != nil {
|
||||
metricsLogger.Error("Failed to init Graphite metric publisher", "error", err)
|
||||
} else if graphitePublisher != nil {
|
||||
metricsLogger.Info("Metrics publisher initialized", "type", "graphite")
|
||||
settings.Publishers = append(settings.Publishers, graphitePublisher)
|
||||
cfg, err := parseGraphiteSettings(settings, file)
|
||||
if err != nil {
|
||||
metricsLogger.Crit("Unable to parse metrics graphite section", "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
settings.GraphiteBridgeConfig = cfg
|
||||
|
||||
return settings
|
||||
}
|
||||
|
||||
func parseGraphiteSettings(settings *MetricSettings, file *ini.File) (*graphitebridge.Config, error) {
|
||||
graphiteSection, err := setting.Cfg.GetSection("metrics.graphite")
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
address := graphiteSection.Key("address").String()
|
||||
if address == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cfg := &graphitebridge.Config{
|
||||
URL: address,
|
||||
Prefix: graphiteSection.Key("prefix").MustString("prod.grafana.%(instance_name)s"),
|
||||
CountersAsDelta: true,
|
||||
Gatherer: prometheus.DefaultGatherer,
|
||||
Interval: time.Duration(settings.IntervalSeconds) * time.Second,
|
||||
Timeout: 10 * time.Second,
|
||||
Logger: &logWrapper{logger: metricsLogger},
|
||||
ErrorHandling: graphitebridge.ContinueOnError,
|
||||
}
|
||||
|
||||
safeInstanceName := strings.Replace(setting.InstanceName, ".", "_", -1)
|
||||
prefix := graphiteSection.Key("prefix").Value()
|
||||
|
||||
if prefix == "" {
|
||||
prefix = "prod.grafana.%(instance_name)s."
|
||||
}
|
||||
|
||||
cfg.Prefix = strings.Replace(prefix, "%(instance_name)s", safeInstanceName, -1)
|
||||
return cfg, nil
|
||||
}
|
||||
|
@ -1,310 +0,0 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Timers capture the duration and rate of events.
|
||||
type Timer interface {
|
||||
Metric
|
||||
|
||||
Count() int64
|
||||
Max() int64
|
||||
Mean() float64
|
||||
Min() int64
|
||||
Percentile(float64) float64
|
||||
Percentiles([]float64) []float64
|
||||
Rate1() float64
|
||||
Rate5() float64
|
||||
Rate15() float64
|
||||
RateMean() float64
|
||||
StdDev() float64
|
||||
Sum() int64
|
||||
Time(func())
|
||||
Update(time.Duration)
|
||||
UpdateSince(time.Time)
|
||||
Variance() float64
|
||||
}
|
||||
|
||||
// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
|
||||
func NewCustomTimer(meta *MetricMeta, h Histogram, m Meter) Timer {
|
||||
if UseNilMetrics {
|
||||
return NilTimer{}
|
||||
}
|
||||
return &StandardTimer{
|
||||
MetricMeta: meta,
|
||||
histogram: h,
|
||||
meter: m,
|
||||
}
|
||||
}
|
||||
|
||||
// NewTimer constructs a new StandardTimer using an exponentially-decaying
|
||||
// sample with the same reservoir size and alpha as UNIX load averages.
|
||||
func NewTimer(meta *MetricMeta) Timer {
|
||||
if UseNilMetrics {
|
||||
return NilTimer{}
|
||||
}
|
||||
return &StandardTimer{
|
||||
MetricMeta: meta,
|
||||
histogram: NewHistogram(meta, NewExpDecaySample(1028, 0.015)),
|
||||
meter: NewMeter(meta),
|
||||
}
|
||||
}
|
||||
|
||||
func RegTimer(name string, tagStrings ...string) Timer {
|
||||
tr := NewTimer(NewMetricMeta(name, tagStrings))
|
||||
MetricStats.Register(tr)
|
||||
return tr
|
||||
}
|
||||
|
||||
// NilTimer is a no-op Timer.
|
||||
type NilTimer struct {
|
||||
*MetricMeta
|
||||
h Histogram
|
||||
m Meter
|
||||
}
|
||||
|
||||
// Count is a no-op.
|
||||
func (NilTimer) Count() int64 { return 0 }
|
||||
|
||||
// Max is a no-op.
|
||||
func (NilTimer) Max() int64 { return 0 }
|
||||
|
||||
// Mean is a no-op.
|
||||
func (NilTimer) Mean() float64 { return 0.0 }
|
||||
|
||||
// Min is a no-op.
|
||||
func (NilTimer) Min() int64 { return 0 }
|
||||
|
||||
// Percentile is a no-op.
|
||||
func (NilTimer) Percentile(p float64) float64 { return 0.0 }
|
||||
|
||||
// Percentiles is a no-op.
|
||||
func (NilTimer) Percentiles(ps []float64) []float64 {
|
||||
return make([]float64, len(ps))
|
||||
}
|
||||
|
||||
// Rate1 is a no-op.
|
||||
func (NilTimer) Rate1() float64 { return 0.0 }
|
||||
|
||||
// Rate5 is a no-op.
|
||||
func (NilTimer) Rate5() float64 { return 0.0 }
|
||||
|
||||
// Rate15 is a no-op.
|
||||
func (NilTimer) Rate15() float64 { return 0.0 }
|
||||
|
||||
// RateMean is a no-op.
|
||||
func (NilTimer) RateMean() float64 { return 0.0 }
|
||||
|
||||
// Snapshot is a no-op.
|
||||
func (n NilTimer) Snapshot() Metric { return n }
|
||||
|
||||
// StdDev is a no-op.
|
||||
func (NilTimer) StdDev() float64 { return 0.0 }
|
||||
|
||||
// Sum is a no-op.
|
||||
func (NilTimer) Sum() int64 { return 0 }
|
||||
|
||||
// Time is a no-op.
|
||||
func (NilTimer) Time(func()) {}
|
||||
|
||||
// Update is a no-op.
|
||||
func (NilTimer) Update(time.Duration) {}
|
||||
|
||||
// UpdateSince is a no-op.
|
||||
func (NilTimer) UpdateSince(time.Time) {}
|
||||
|
||||
// Variance is a no-op.
|
||||
func (NilTimer) Variance() float64 { return 0.0 }
|
||||
|
||||
// StandardTimer is the standard implementation of a Timer and uses a Histogram
|
||||
// and Meter.
|
||||
type StandardTimer struct {
|
||||
*MetricMeta
|
||||
histogram Histogram
|
||||
meter Meter
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// Count returns the number of events recorded.
|
||||
func (t *StandardTimer) Count() int64 {
|
||||
return t.histogram.Count()
|
||||
}
|
||||
|
||||
// Max returns the maximum value in the sample.
|
||||
func (t *StandardTimer) Max() int64 {
|
||||
return t.histogram.Max()
|
||||
}
|
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (t *StandardTimer) Mean() float64 {
|
||||
return t.histogram.Mean()
|
||||
}
|
||||
|
||||
// Min returns the minimum value in the sample.
|
||||
func (t *StandardTimer) Min() int64 {
|
||||
return t.histogram.Min()
|
||||
}
|
||||
|
||||
// Percentile returns an arbitrary percentile of the values in the sample.
|
||||
func (t *StandardTimer) Percentile(p float64) float64 {
|
||||
return t.histogram.Percentile(p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
||||
// sample.
|
||||
func (t *StandardTimer) Percentiles(ps []float64) []float64 {
|
||||
return t.histogram.Percentiles(ps)
|
||||
}
|
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second.
|
||||
func (t *StandardTimer) Rate1() float64 {
|
||||
return t.meter.Rate1()
|
||||
}
|
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second.
|
||||
func (t *StandardTimer) Rate5() float64 {
|
||||
return t.meter.Rate5()
|
||||
}
|
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
||||
func (t *StandardTimer) Rate15() float64 {
|
||||
return t.meter.Rate15()
|
||||
}
|
||||
|
||||
// RateMean returns the meter's mean rate of events per second.
|
||||
func (t *StandardTimer) RateMean() float64 {
|
||||
return t.meter.RateMean()
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the timer.
|
||||
func (t *StandardTimer) Snapshot() Metric {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
return &TimerSnapshot{
|
||||
MetricMeta: t.MetricMeta,
|
||||
histogram: t.histogram.Snapshot().(*HistogramSnapshot),
|
||||
meter: t.meter.Snapshot().(*MeterSnapshot),
|
||||
}
|
||||
}
|
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (t *StandardTimer) StdDev() float64 {
|
||||
return t.histogram.StdDev()
|
||||
}
|
||||
|
||||
// Sum returns the sum in the sample.
|
||||
func (t *StandardTimer) Sum() int64 {
|
||||
return t.histogram.Sum()
|
||||
}
|
||||
|
||||
// Record the duration of the execution of the given function.
|
||||
func (t *StandardTimer) Time(f func()) {
|
||||
ts := time.Now()
|
||||
f()
|
||||
t.Update(time.Since(ts))
|
||||
}
|
||||
|
||||
// Record the duration of an event.
|
||||
func (t *StandardTimer) Update(d time.Duration) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
t.histogram.Update(int64(d))
|
||||
t.meter.Mark(1)
|
||||
}
|
||||
|
||||
// Record the duration of an event that started at a time and ends now.
|
||||
func (t *StandardTimer) UpdateSince(ts time.Time) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
sinceMs := time.Since(ts) / time.Millisecond
|
||||
t.histogram.Update(int64(sinceMs))
|
||||
t.meter.Mark(1)
|
||||
}
|
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (t *StandardTimer) Variance() float64 {
|
||||
return t.histogram.Variance()
|
||||
}
|
||||
|
||||
// TimerSnapshot is a read-only copy of another Timer.
|
||||
type TimerSnapshot struct {
|
||||
*MetricMeta
|
||||
histogram *HistogramSnapshot
|
||||
meter *MeterSnapshot
|
||||
}
|
||||
|
||||
// Count returns the number of events recorded at the time the snapshot was
|
||||
// taken.
|
||||
func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
|
||||
|
||||
// Max returns the maximum value at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
|
||||
|
||||
// Mean returns the mean value at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
|
||||
|
||||
// Min returns the minimum value at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
|
||||
|
||||
// Percentile returns an arbitrary percentile of sampled values at the time the
|
||||
// snapshot was taken.
|
||||
func (t *TimerSnapshot) Percentile(p float64) float64 {
|
||||
return t.histogram.Percentile(p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of sampled values at
|
||||
// the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
|
||||
return t.histogram.Percentiles(ps)
|
||||
}
|
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second at the
|
||||
// time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
|
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second at
|
||||
// the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
|
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second
|
||||
// at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
|
||||
|
||||
// RateMean returns the meter's mean rate of events per second at the time the
|
||||
// snapshot was taken.
|
||||
func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (t *TimerSnapshot) Snapshot() Metric { return t }
|
||||
|
||||
// StdDev returns the standard deviation of the values at the time the snapshot
|
||||
// was taken.
|
||||
func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
|
||||
|
||||
// Sum returns the sum at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
|
||||
|
||||
// Time panics.
|
||||
func (*TimerSnapshot) Time(func()) {
|
||||
panic("Time called on a TimerSnapshot")
|
||||
}
|
||||
|
||||
// Update panics.
|
||||
func (*TimerSnapshot) Update(time.Duration) {
|
||||
panic("Update called on a TimerSnapshot")
|
||||
}
|
||||
|
||||
// UpdateSince panics.
|
||||
func (*TimerSnapshot) UpdateSince(time.Time) {
|
||||
panic("UpdateSince called on a TimerSnapshot")
|
||||
}
|
||||
|
||||
// Variance returns the variance of the values at the time the snapshot was
|
||||
// taken.
|
||||
func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
|
@ -19,8 +19,8 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/macaron.v1"
|
||||
)
|
||||
|
||||
@ -35,8 +35,8 @@ func Logger() macaron.Handler {
|
||||
timeTakenMs := time.Since(start) / time.Millisecond
|
||||
|
||||
if timer, ok := c.Data["perfmon.timer"]; ok {
|
||||
timerTyped := timer.(metrics.Timer)
|
||||
timerTyped.Update(timeTakenMs)
|
||||
timerTyped := timer.(prometheus.Summary)
|
||||
timerTyped.Observe(float64(timeTakenMs))
|
||||
}
|
||||
|
||||
status := rw.Status()
|
||||
|
@ -10,10 +10,10 @@ import (
|
||||
"github.com/grafana/grafana/pkg/components/apikeygen"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
l "github.com/grafana/grafana/pkg/login"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type Context struct {
|
||||
@ -251,7 +251,7 @@ func (ctx *Context) HasHelpFlag(flag m.HelpFlags1) bool {
|
||||
return ctx.HelpFlags1.HasFlag(flag)
|
||||
}
|
||||
|
||||
func (ctx *Context) TimeRequest(timer metrics.Timer) {
|
||||
func (ctx *Context) TimeRequest(timer prometheus.Summary) {
|
||||
ctx.Data["perfmon.timer"] = timer
|
||||
}
|
||||
|
||||
|
@ -2,19 +2,28 @@ package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
"gopkg.in/macaron.v1"
|
||||
)
|
||||
|
||||
func RequestMetrics() macaron.Handler {
|
||||
func RequestMetrics(handler string) macaron.Handler {
|
||||
return func(res http.ResponseWriter, req *http.Request, c *macaron.Context) {
|
||||
rw := res.(macaron.ResponseWriter)
|
||||
now := time.Now()
|
||||
c.Next()
|
||||
|
||||
status := rw.Status()
|
||||
|
||||
code := sanitizeCode(status)
|
||||
method := sanitizeMethod(req.Method)
|
||||
metrics.M_Http_Request_Total.WithLabelValues(handler, code, method).Inc()
|
||||
duration := time.Since(now).Nanoseconds() / int64(time.Millisecond)
|
||||
metrics.M_Http_Request_Summary.WithLabelValues(handler, code, method).Observe(float64(duration))
|
||||
|
||||
if strings.HasPrefix(req.RequestURI, "/api/datasources/proxy") {
|
||||
countProxyRequests(status)
|
||||
} else if strings.HasPrefix(req.RequestURI, "/api/") {
|
||||
@ -28,38 +37,165 @@ func RequestMetrics() macaron.Handler {
|
||||
func countApiRequests(status int) {
|
||||
switch status {
|
||||
case 200:
|
||||
metrics.M_Api_Status_200.Inc(1)
|
||||
metrics.M_Api_Status.WithLabelValues("200").Inc()
|
||||
case 404:
|
||||
metrics.M_Api_Status_404.Inc(1)
|
||||
metrics.M_Api_Status.WithLabelValues("404").Inc()
|
||||
case 500:
|
||||
metrics.M_Api_Status_500.Inc(1)
|
||||
metrics.M_Api_Status.WithLabelValues("500").Inc()
|
||||
default:
|
||||
metrics.M_Api_Status_Unknown.Inc(1)
|
||||
metrics.M_Api_Status.WithLabelValues("unknown").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func countPageRequests(status int) {
|
||||
switch status {
|
||||
case 200:
|
||||
metrics.M_Page_Status_200.Inc(1)
|
||||
metrics.M_Page_Status.WithLabelValues("200").Inc()
|
||||
case 404:
|
||||
metrics.M_Page_Status_404.Inc(1)
|
||||
metrics.M_Page_Status.WithLabelValues("404").Inc()
|
||||
case 500:
|
||||
metrics.M_Page_Status_500.Inc(1)
|
||||
metrics.M_Page_Status.WithLabelValues("500").Inc()
|
||||
default:
|
||||
metrics.M_Page_Status_Unknown.Inc(1)
|
||||
metrics.M_Page_Status.WithLabelValues("unknown").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func countProxyRequests(status int) {
|
||||
switch status {
|
||||
case 200:
|
||||
metrics.M_Proxy_Status_200.Inc(1)
|
||||
metrics.M_Proxy_Status.WithLabelValues("200").Inc()
|
||||
case 404:
|
||||
metrics.M_Proxy_Status_404.Inc(1)
|
||||
metrics.M_Proxy_Status.WithLabelValues("400").Inc()
|
||||
case 500:
|
||||
metrics.M_Proxy_Status_500.Inc(1)
|
||||
metrics.M_Proxy_Status.WithLabelValues("500").Inc()
|
||||
default:
|
||||
metrics.M_Proxy_Status_Unknown.Inc(1)
|
||||
metrics.M_Proxy_Status.WithLabelValues("unknown").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func sanitizeMethod(m string) string {
|
||||
switch m {
|
||||
case "GET", "get":
|
||||
return "get"
|
||||
case "PUT", "put":
|
||||
return "put"
|
||||
case "HEAD", "head":
|
||||
return "head"
|
||||
case "POST", "post":
|
||||
return "post"
|
||||
case "DELETE", "delete":
|
||||
return "delete"
|
||||
case "CONNECT", "connect":
|
||||
return "connect"
|
||||
case "OPTIONS", "options":
|
||||
return "options"
|
||||
case "NOTIFY", "notify":
|
||||
return "notify"
|
||||
default:
|
||||
return strings.ToLower(m)
|
||||
}
|
||||
}
|
||||
|
||||
// If the wrapped http.Handler has not set a status code, i.e. the value is
|
||||
// currently 0, santizeCode will return 200, for consistency with behavior in
|
||||
// the stdlib.
|
||||
func sanitizeCode(s int) string {
|
||||
switch s {
|
||||
case 100:
|
||||
return "100"
|
||||
case 101:
|
||||
return "101"
|
||||
|
||||
case 200, 0:
|
||||
return "200"
|
||||
case 201:
|
||||
return "201"
|
||||
case 202:
|
||||
return "202"
|
||||
case 203:
|
||||
return "203"
|
||||
case 204:
|
||||
return "204"
|
||||
case 205:
|
||||
return "205"
|
||||
case 206:
|
||||
return "206"
|
||||
|
||||
case 300:
|
||||
return "300"
|
||||
case 301:
|
||||
return "301"
|
||||
case 302:
|
||||
return "302"
|
||||
case 304:
|
||||
return "304"
|
||||
case 305:
|
||||
return "305"
|
||||
case 307:
|
||||
return "307"
|
||||
|
||||
case 400:
|
||||
return "400"
|
||||
case 401:
|
||||
return "401"
|
||||
case 402:
|
||||
return "402"
|
||||
case 403:
|
||||
return "403"
|
||||
case 404:
|
||||
return "404"
|
||||
case 405:
|
||||
return "405"
|
||||
case 406:
|
||||
return "406"
|
||||
case 407:
|
||||
return "407"
|
||||
case 408:
|
||||
return "408"
|
||||
case 409:
|
||||
return "409"
|
||||
case 410:
|
||||
return "410"
|
||||
case 411:
|
||||
return "411"
|
||||
case 412:
|
||||
return "412"
|
||||
case 413:
|
||||
return "413"
|
||||
case 414:
|
||||
return "414"
|
||||
case 415:
|
||||
return "415"
|
||||
case 416:
|
||||
return "416"
|
||||
case 417:
|
||||
return "417"
|
||||
case 418:
|
||||
return "418"
|
||||
|
||||
case 500:
|
||||
return "500"
|
||||
case 501:
|
||||
return "501"
|
||||
case 502:
|
||||
return "502"
|
||||
case 503:
|
||||
return "503"
|
||||
case 504:
|
||||
return "504"
|
||||
case 505:
|
||||
return "505"
|
||||
|
||||
case 428:
|
||||
return "428"
|
||||
case 429:
|
||||
return "429"
|
||||
case 431:
|
||||
return "431"
|
||||
case 511:
|
||||
return "511"
|
||||
|
||||
default:
|
||||
return strconv.Itoa(s)
|
||||
}
|
||||
}
|
||||
|
@ -54,19 +54,31 @@ type DataSource struct {
|
||||
}
|
||||
|
||||
var knownDatasourcePlugins map[string]bool = map[string]bool{
|
||||
DS_ES: true,
|
||||
DS_GRAPHITE: true,
|
||||
DS_INFLUXDB: true,
|
||||
DS_INFLUXDB_08: true,
|
||||
DS_KAIROSDB: true,
|
||||
DS_CLOUDWATCH: true,
|
||||
DS_PROMETHEUS: true,
|
||||
DS_OPENTSDB: true,
|
||||
"opennms": true,
|
||||
"druid": true,
|
||||
"dalmatinerdb": true,
|
||||
"gnocci": true,
|
||||
"zabbix": true,
|
||||
DS_ES: true,
|
||||
DS_GRAPHITE: true,
|
||||
DS_INFLUXDB: true,
|
||||
DS_INFLUXDB_08: true,
|
||||
DS_KAIROSDB: true,
|
||||
DS_CLOUDWATCH: true,
|
||||
DS_PROMETHEUS: true,
|
||||
DS_OPENTSDB: true,
|
||||
"opennms": true,
|
||||
"druid": true,
|
||||
"dalmatinerdb": true,
|
||||
"gnocci": true,
|
||||
"zabbix": true,
|
||||
"newrelic-app": true,
|
||||
"grafana-datadog-datasource": true,
|
||||
"grafana-simple-json": true,
|
||||
"grafana-splunk-datasource": true,
|
||||
"udoprog-heroic-datasource": true,
|
||||
"grafana-openfalcon-datasource": true,
|
||||
"opennms-datasource": true,
|
||||
"rackerlabs-blueflood-datasource": true,
|
||||
"crate-datasource": true,
|
||||
"ayoungprogrammer-finance-datasource": true,
|
||||
"monasca-datasource": true,
|
||||
"vertamedia-clickhouse-datasource": true,
|
||||
}
|
||||
|
||||
func IsKnownDataSourcePlugin(dsType string) bool {
|
||||
|
@ -63,8 +63,8 @@ func (e *DefaultEvalHandler) Eval(context *EvalContext) {
|
||||
context.EndTime = time.Now()
|
||||
context.Rule.State = e.getNewState(context)
|
||||
|
||||
elapsedTime := context.EndTime.Sub(context.StartTime) / time.Millisecond
|
||||
metrics.M_Alerting_Execution_Time.Update(elapsedTime)
|
||||
elapsedTime := context.EndTime.Sub(context.StartTime).Nanoseconds() / int64(time.Millisecond)
|
||||
metrics.M_Alerting_Execution_Time.Observe(float64(elapsedTime))
|
||||
}
|
||||
|
||||
// This should be move into evalContext once its been refactored.
|
||||
|
@ -10,6 +10,8 @@ import (
|
||||
"github.com/grafana/grafana/pkg/components/imguploader"
|
||||
"github.com/grafana/grafana/pkg/components/renderer"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
)
|
||||
|
||||
@ -66,6 +68,7 @@ func (n *notificationService) sendNotifications(context *EvalContext, notifiers
|
||||
for _, notifier := range notifiers {
|
||||
not := notifier //avoid updating scope variable in go routine
|
||||
n.log.Info("Sending notification", "type", not.GetType(), "id", not.GetNotifierId(), "isDefault", not.GetIsDefault())
|
||||
metrics.M_Alerting_Notification_Sent.WithLabelValues(not.GetType()).Inc()
|
||||
g.Go(func() error { return not.Notify(context) })
|
||||
}
|
||||
|
||||
@ -98,7 +101,7 @@ func (n *notificationService) uploadImage(context *EvalContext) (err error) {
|
||||
context.ImageOnDiskPath = imagePath
|
||||
}
|
||||
|
||||
context.ImagePublicUrl, err = uploader.Upload(context.ImageOnDiskPath)
|
||||
context.ImagePublicUrl, err = uploader.Upload(context.Ctx, context.ImageOnDiskPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
)
|
||||
@ -47,7 +46,6 @@ type DingDingNotifier struct {
|
||||
|
||||
func (this *DingDingNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Sending dingding")
|
||||
metrics.M_Alerting_Notification_Sent_DingDing.Inc(1)
|
||||
|
||||
messageUrl, err := evalContext.GetRuleUrl()
|
||||
if err != nil {
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
@ -61,7 +60,6 @@ func NewEmailNotifier(model *m.AlertNotification) (alerting.Notifier, error) {
|
||||
|
||||
func (this *EmailNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Sending alert notification to", "addresses", this.Addresses)
|
||||
metrics.M_Alerting_Notification_Sent_Email.Inc(1)
|
||||
|
||||
ruleUrl, err := evalContext.GetRuleUrl()
|
||||
if err != nil {
|
||||
|
@ -2,12 +2,12 @@ package notifiers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -53,7 +53,6 @@ type LineNotifier struct {
|
||||
|
||||
func (this *LineNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Executing line notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
|
||||
metrics.M_Alerting_Notification_Sent_LINE.Inc(1)
|
||||
|
||||
var err error
|
||||
switch evalContext.Rule.State {
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
)
|
||||
@ -65,7 +64,6 @@ type OpsGenieNotifier struct {
|
||||
}
|
||||
|
||||
func (this *OpsGenieNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
metrics.M_Alerting_Notification_Sent_OpsGenie.Inc(1)
|
||||
|
||||
var err error
|
||||
switch evalContext.Rule.State {
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
)
|
||||
@ -63,7 +62,6 @@ type PagerdutyNotifier struct {
|
||||
}
|
||||
|
||||
func (this *PagerdutyNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
metrics.M_Alerting_Notification_Sent_PagerDuty.Inc(1)
|
||||
|
||||
if evalContext.Rule.State == m.AlertStateOK && !this.AutoResolve {
|
||||
this.log.Info("Not sending a trigger to Pagerduty", "state", evalContext.Rule.State, "auto resolve", this.AutoResolve)
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
)
|
||||
@ -125,7 +124,6 @@ type PushoverNotifier struct {
|
||||
}
|
||||
|
||||
func (this *PushoverNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
metrics.M_Alerting_Notification_Sent_Pushover.Inc(1)
|
||||
ruleUrl, err := evalContext.GetRuleUrl()
|
||||
if err != nil {
|
||||
this.log.Error("Failed get rule link", "error", err)
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
)
|
||||
@ -74,7 +73,6 @@ type SensuNotifier struct {
|
||||
|
||||
func (this *SensuNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Sending sensu result")
|
||||
metrics.M_Alerting_Notification_Sent_Sensu.Inc(1)
|
||||
|
||||
bodyJSON := simplejson.New()
|
||||
bodyJSON.Set("ruleId", evalContext.Rule.Id)
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
@ -79,7 +78,6 @@ type SlackNotifier struct {
|
||||
|
||||
func (this *SlackNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Executing slack notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
|
||||
metrics.M_Alerting_Notification_Sent_Slack.Inc(1)
|
||||
|
||||
ruleUrl, err := evalContext.GetRuleUrl()
|
||||
if err != nil {
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
)
|
||||
@ -80,7 +79,6 @@ func NewTelegramNotifier(model *m.AlertNotification) (alerting.Notifier, error)
|
||||
func (this *TelegramNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Sending alert notification to", "bot_token", this.BotToken)
|
||||
this.log.Info("Sending alert notification to", "chat_id", this.ChatID)
|
||||
metrics.M_Alerting_Notification_Sent_Telegram.Inc(1)
|
||||
|
||||
bodyJSON := simplejson.New()
|
||||
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
)
|
||||
@ -118,7 +117,6 @@ func NewThreemaNotifier(model *m.AlertNotification) (alerting.Notifier, error) {
|
||||
func (notifier *ThreemaNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
notifier.log.Info("Sending alert notification from", "threema_id", notifier.GatewayID)
|
||||
notifier.log.Info("Sending alert notification to", "threema_id", notifier.RecipientID)
|
||||
metrics.M_Alerting_Notification_Sent_Threema.Inc(1)
|
||||
|
||||
// Set up basic API request data
|
||||
data := url.Values{}
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
@ -72,7 +71,6 @@ type VictoropsNotifier struct {
|
||||
// Notify sends notification to Victorops via POST to URL endpoint
|
||||
func (this *VictoropsNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Executing victorops notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
|
||||
metrics.M_Alerting_Notification_Sent_Victorops.Inc(1)
|
||||
|
||||
ruleUrl, err := evalContext.GetRuleUrl()
|
||||
if err != nil {
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/alerting"
|
||||
)
|
||||
@ -68,7 +67,6 @@ type WebhookNotifier struct {
|
||||
|
||||
func (this *WebhookNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Sending webhook")
|
||||
metrics.M_Alerting_Notification_Sent_Webhook.Inc(1)
|
||||
|
||||
bodyJSON := simplejson.New()
|
||||
bodyJSON.Set("title", evalContext.GetNotificationTitle())
|
||||
|
@ -59,7 +59,7 @@ func (arr *DefaultRuleReader) Fetch() []*Rule {
|
||||
}
|
||||
}
|
||||
|
||||
metrics.M_Alerting_Active_Alerts.Update(int64(len(res)))
|
||||
metrics.M_Alerting_Active_Alerts.Set(float64(len(res)))
|
||||
return res
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error {
|
||||
annotationData.Set("noData", true)
|
||||
}
|
||||
|
||||
countStateResult(evalContext.Rule.State)
|
||||
metrics.M_Alerting_Result_State.WithLabelValues(string(evalContext.Rule.State)).Inc()
|
||||
if evalContext.ShouldUpdateAlertState() {
|
||||
handler.log.Info("New state change", "alertId", evalContext.Rule.Id, "newState", evalContext.Rule.State, "prev state", evalContext.PrevAlertState)
|
||||
|
||||
@ -95,18 +95,3 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func countStateResult(state m.AlertStateType) {
|
||||
switch state {
|
||||
case m.AlertStatePending:
|
||||
metrics.M_Alerting_Result_State_Pending.Inc(1)
|
||||
case m.AlertStateAlerting:
|
||||
metrics.M_Alerting_Result_State_Alerting.Inc(1)
|
||||
case m.AlertStateOK:
|
||||
metrics.M_Alerting_Result_State_Ok.Inc(1)
|
||||
case m.AlertStatePaused:
|
||||
metrics.M_Alerting_Result_State_Paused.Inc(1)
|
||||
case m.AlertStateNoData:
|
||||
metrics.M_Alerting_Result_State_NoData.Inc(1)
|
||||
}
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func SaveDashboard(cmd *m.SaveDashboardCommand) error {
|
||||
|
||||
if dash.Id == 0 {
|
||||
dash.Version = 1
|
||||
metrics.M_Models_Dashboard_Insert.Inc(1)
|
||||
metrics.M_Api_Dashboard_Insert.Inc()
|
||||
dash.Data.Set("version", dash.Version)
|
||||
affectedRows, err = sess.Insert(dash)
|
||||
} else {
|
||||
|
@ -20,7 +20,7 @@ func init() {
|
||||
}
|
||||
|
||||
func GetDataSourceById(query *m.GetDataSourceByIdQuery) error {
|
||||
metrics.M_DB_DataSource_QueryById.Inc(1)
|
||||
metrics.M_DB_DataSource_QueryById.Inc()
|
||||
|
||||
datasource := m.DataSource{OrgId: query.OrgId, Id: query.Id}
|
||||
has, err := x.Get(&datasource)
|
||||
|
@ -13,8 +13,11 @@ import (
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
"github.com/prometheus/client_golang/api/prometheus"
|
||||
pmodel "github.com/prometheus/common/model"
|
||||
api "github.com/prometheus/client_golang/api"
|
||||
apiv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
//api "github.com/prometheus/client_golang/api"
|
||||
//apiv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
)
|
||||
|
||||
type PrometheusExecutor struct {
|
||||
@ -57,26 +60,26 @@ func init() {
|
||||
legendFormat = regexp.MustCompile(`\{\{\s*(.+?)\s*\}\}`)
|
||||
}
|
||||
|
||||
func (e *PrometheusExecutor) getClient() (prometheus.QueryAPI, error) {
|
||||
cfg := prometheus.Config{
|
||||
Address: e.DataSource.Url,
|
||||
Transport: e.Transport,
|
||||
func (e *PrometheusExecutor) getClient() (apiv1.API, error) {
|
||||
cfg := api.Config{
|
||||
Address: e.DataSource.Url,
|
||||
RoundTripper: e.Transport,
|
||||
}
|
||||
|
||||
if e.BasicAuth {
|
||||
cfg.Transport = basicAuthTransport{
|
||||
cfg.RoundTripper = basicAuthTransport{
|
||||
Transport: e.Transport,
|
||||
username: e.BasicAuthUser,
|
||||
password: e.BasicAuthPassword,
|
||||
}
|
||||
}
|
||||
|
||||
client, err := prometheus.New(cfg)
|
||||
client, err := api.NewClient(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return prometheus.NewQueryAPI(client), nil
|
||||
return apiv1.NewAPI(client), nil
|
||||
}
|
||||
|
||||
func (e *PrometheusExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, queryContext *tsdb.QueryContext) *tsdb.BatchResult {
|
||||
@ -92,7 +95,7 @@ func (e *PrometheusExecutor) Execute(ctx context.Context, queries tsdb.QuerySlic
|
||||
return result.WithError(err)
|
||||
}
|
||||
|
||||
timeRange := prometheus.Range{
|
||||
timeRange := apiv1.Range{
|
||||
Start: query.Start,
|
||||
End: query.End,
|
||||
Step: query.Step,
|
||||
@ -112,7 +115,7 @@ func (e *PrometheusExecutor) Execute(ctx context.Context, queries tsdb.QuerySlic
|
||||
return result
|
||||
}
|
||||
|
||||
func formatLegend(metric pmodel.Metric, query *PrometheusQuery) string {
|
||||
func formatLegend(metric model.Metric, query *PrometheusQuery) string {
|
||||
if query.LegendFormat == "" {
|
||||
return metric.String()
|
||||
}
|
||||
@ -121,7 +124,7 @@ func formatLegend(metric pmodel.Metric, query *PrometheusQuery) string {
|
||||
labelName := strings.Replace(string(in), "{{", "", 1)
|
||||
labelName = strings.Replace(labelName, "}}", "", 1)
|
||||
labelName = strings.TrimSpace(labelName)
|
||||
if val, exists := metric[pmodel.LabelName(labelName)]; exists {
|
||||
if val, exists := metric[model.LabelName(labelName)]; exists {
|
||||
return []byte(val)
|
||||
}
|
||||
|
||||
@ -165,11 +168,11 @@ func parseQuery(queries tsdb.QuerySlice, queryContext *tsdb.QueryContext) (*Prom
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseResponse(value pmodel.Value, query *PrometheusQuery) (map[string]*tsdb.QueryResult, error) {
|
||||
func parseResponse(value model.Value, query *PrometheusQuery) (map[string]*tsdb.QueryResult, error) {
|
||||
queryResults := make(map[string]*tsdb.QueryResult)
|
||||
queryRes := tsdb.NewQueryResult()
|
||||
|
||||
data, ok := value.(pmodel.Matrix)
|
||||
data, ok := value.(model.Matrix)
|
||||
if !ok {
|
||||
return queryResults, fmt.Errorf("Unsupported result format: %s", value.Type().String())
|
||||
}
|
||||
|
@ -389,12 +389,17 @@ function($, _) {
|
||||
return value.toExponential(decimals);
|
||||
};
|
||||
|
||||
kbn.valueFormats.locale = function(value, decimals) {
|
||||
return value.toLocaleString(undefined, {maximumFractionDigits: decimals});
|
||||
};
|
||||
|
||||
// Currencies
|
||||
kbn.valueFormats.currencyUSD = kbn.formatBuilders.currency('$');
|
||||
kbn.valueFormats.currencyGBP = kbn.formatBuilders.currency('£');
|
||||
kbn.valueFormats.currencyEUR = kbn.formatBuilders.currency('€');
|
||||
kbn.valueFormats.currencyJPY = kbn.formatBuilders.currency('¥');
|
||||
kbn.valueFormats.currencyRUB = kbn.formatBuilders.currency('₽');
|
||||
kbn.valueFormats.currencyUAH = kbn.formatBuilders.currency('₴');
|
||||
|
||||
// Data (Binary)
|
||||
kbn.valueFormats.bits = kbn.formatBuilders.binarySIPrefix('b');
|
||||
@ -708,6 +713,7 @@ function($, _) {
|
||||
{text: 'hexadecimal (0x)', value: 'hex0x' },
|
||||
{text: 'hexadecimal', value: 'hex' },
|
||||
{text: 'scientific notation', value: 'sci' },
|
||||
{text: 'locale format', value: 'locale' },
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -718,6 +724,7 @@ function($, _) {
|
||||
{text: 'Euro (€)', value: 'currencyEUR'},
|
||||
{text: 'Yen (¥)', value: 'currencyJPY'},
|
||||
{text: 'Rubles (₽)', value: 'currencyRUB'},
|
||||
{text: 'Hryvnias (₴)', value: 'currencyUAH'},
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -245,6 +245,9 @@ export class DashboardModel {
|
||||
delete newPanel.repeatIteration;
|
||||
delete newPanel.repeatPanelId;
|
||||
delete newPanel.scopedVars;
|
||||
if (newPanel.alert) {
|
||||
delete newPanel.thresholds;
|
||||
}
|
||||
delete newPanel.alert;
|
||||
|
||||
row.addPanel(newPanel);
|
||||
|
@ -184,7 +184,6 @@ export class GraphiteQueryCtrl extends QueryCtrl {
|
||||
altSegments.unshift(this.uiSegmentSrv.newSegment('*'));
|
||||
return altSegments;
|
||||
}).catch(err => {
|
||||
appEvents.emit('alert-error', ['Error', err]);
|
||||
return [];
|
||||
});
|
||||
}
|
||||
|
106
public/app/plugins/datasource/mysql/mode-sql.js
Normal file
106
public/app/plugins/datasource/mysql/mode-sql.js
Normal file
@ -0,0 +1,106 @@
|
||||
// jshint ignore: start
|
||||
// jscs: disable
|
||||
|
||||
ace.define("ace/mode/sql_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) {
|
||||
"use strict";
|
||||
|
||||
var oop = require("../lib/oop");
|
||||
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
|
||||
|
||||
var SqlHighlightRules = function() {
|
||||
|
||||
var keywords = (
|
||||
"select|insert|update|delete|from|where|and|or|group|by|order|limit|offset|having|as|case|" +
|
||||
"when|else|end|type|left|right|join|on|outer|desc|asc|union|create|table|primary|key|if|" +
|
||||
"foreign|not|references|default|null|inner|cross|natural|database|drop|grant"
|
||||
);
|
||||
|
||||
var builtinConstants = (
|
||||
"true|false"
|
||||
);
|
||||
|
||||
var builtinFunctions = (
|
||||
"avg|count|first|last|max|min|sum|ucase|lcase|mid|len|round|rank|now|format|" +
|
||||
"coalesce|ifnull|isnull|nvl"
|
||||
);
|
||||
|
||||
var dataTypes = (
|
||||
"int|numeric|decimal|date|varchar|char|bigint|float|double|bit|binary|text|set|timestamp|" +
|
||||
"money|real|number|integer"
|
||||
);
|
||||
|
||||
var keywordMapper = this.createKeywordMapper({
|
||||
"support.function": builtinFunctions,
|
||||
"keyword": keywords,
|
||||
"constant.language": builtinConstants,
|
||||
"storage.type": dataTypes
|
||||
}, "identifier", true);
|
||||
|
||||
this.$rules = {
|
||||
"start" : [ {
|
||||
token : "comment",
|
||||
regex : "--.*$"
|
||||
}, {
|
||||
token : "comment",
|
||||
start : "/\\*",
|
||||
end : "\\*/"
|
||||
}, {
|
||||
token : "string", // " string
|
||||
regex : '".*?"'
|
||||
}, {
|
||||
token : "string", // ' string
|
||||
regex : "'.*?'"
|
||||
}, {
|
||||
token : "string", // ` string (apache drill)
|
||||
regex : "`.*?`"
|
||||
}, {
|
||||
token : "constant.numeric", // float
|
||||
regex : "[+-]?\\d+(?:(?:\\.\\d*)?(?:[eE][+-]?\\d+)?)?\\b"
|
||||
}, {
|
||||
token : keywordMapper,
|
||||
regex : "[a-zA-Z_$][a-zA-Z0-9_$]*\\b"
|
||||
}, {
|
||||
token : "keyword.operator",
|
||||
regex : "\\+|\\-|\\/|\\/\\/|%|<@>|@>|<@|&|\\^|~|<|>|<=|=>|==|!=|<>|="
|
||||
}, {
|
||||
token : "paren.lparen",
|
||||
regex : "[\\(]"
|
||||
}, {
|
||||
token : "paren.rparen",
|
||||
regex : "[\\)]"
|
||||
}, {
|
||||
token : "text",
|
||||
regex : "\\s+"
|
||||
} ]
|
||||
};
|
||||
this.normalizeRules();
|
||||
};
|
||||
|
||||
oop.inherits(SqlHighlightRules, TextHighlightRules);
|
||||
|
||||
exports.SqlHighlightRules = SqlHighlightRules;
|
||||
});
|
||||
|
||||
ace.define("ace/mode/sql",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/sql_highlight_rules"], function(require, exports, module) {
|
||||
"use strict";
|
||||
|
||||
var oop = require("../lib/oop");
|
||||
var TextMode = require("./text").Mode;
|
||||
var SqlHighlightRules = require("./sql_highlight_rules").SqlHighlightRules;
|
||||
|
||||
var Mode = function() {
|
||||
this.HighlightRules = SqlHighlightRules;
|
||||
this.$behaviour = this.$defaultBehaviour;
|
||||
};
|
||||
oop.inherits(Mode, TextMode);
|
||||
|
||||
(function() {
|
||||
|
||||
this.lineCommentStart = "--";
|
||||
|
||||
this.$id = "ace/mode/sql";
|
||||
}).call(Mode.prototype);
|
||||
|
||||
exports.Mode = Mode;
|
||||
|
||||
});
|
@ -1,7 +1,7 @@
|
||||
<query-editor-row query-ctrl="ctrl" can-collapse="false">
|
||||
<div class="gf-form-inline">
|
||||
<div class="gf-form gf-form--grow">
|
||||
<code-editor content="ctrl.target.rawSql" on-change="ctrl.panelCtrl.refresh()" data-mode="sql">
|
||||
<code-editor content="ctrl.target.rawSql" datasource="ctrl.datasource" on-change="ctrl.panelCtrl.refresh()" data-mode="sql">
|
||||
</code-editor>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -9,8 +9,8 @@ System.config({
|
||||
'eventemitter3': 'vendor/npm/eventemitter3/index.js',
|
||||
'tether-drop': 'vendor/npm/tether-drop/dist/js/drop.js',
|
||||
'moment': 'vendor/moment.js',
|
||||
"jquery": "vendor/jquery/dist/jquery.js",
|
||||
'lodash-src': 'vendor/lodash/dist/lodash.js',
|
||||
"jquery": "vendor/npm/jquery/dist/jquery.js",
|
||||
'lodash-src': 'vendor/npm/lodash/lodash.js',
|
||||
"lodash": 'app/core/lodash_extended.js',
|
||||
"angular": "vendor/angular/angular.js",
|
||||
"bootstrap": "vendor/bootstrap/bootstrap.js",
|
||||
|
@ -300,7 +300,7 @@ a.external-link {
|
||||
max-width: 100%;
|
||||
}
|
||||
|
||||
ul {
|
||||
ul, ol {
|
||||
padding-left: $spacer*1.5;
|
||||
margin-bottom: $spacer;
|
||||
}
|
||||
@ -332,7 +332,7 @@ a.external-link {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
ul:last-child {
|
||||
ul:last-child, ol:last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
}
|
||||
|
@ -17,8 +17,8 @@
|
||||
'tether': 'vendor/npm/tether/dist/js/tether.js',
|
||||
'tether-drop': 'vendor/npm/tether-drop/dist/js/drop.js',
|
||||
'moment': 'vendor/moment.js',
|
||||
"jquery": "vendor/jquery/dist/jquery.js",
|
||||
'lodash-src': 'vendor/lodash/dist/lodash.js',
|
||||
"jquery": "vendor/npm/jquery/dist/jquery.js",
|
||||
'lodash-src': 'vendor/npm/lodash/lodash.js',
|
||||
"lodash": 'app/core/lodash_extended.js',
|
||||
"angular": 'vendor/angular/angular.js',
|
||||
'angular-mocks': 'vendor/angular-mocks/angular-mocks.js',
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user