mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge remote-tracking branch 'grafana/master' into influx-db-query2
* grafana/master: (248 commits) add AWS/NATGateway metrics (#9202) docs: minor docs update docs: added v4.4 to docs archive docs: uppdated getting started with images and text, added info on query inspector to version45 and trouble shooting (#9193) reduce readme to be about the code adds note about closing #8876 build: fixed broken build Optimize some wrong usage and spelling show only label name in label matcher (#9167) (prometheus) cache metric suggest query result (#9182) Have 'Hide series with only zeros' ignore nulls (#9179) docs: updated image in docs (#9184) fix: graph right y label is now correctly possitioned, fixes #9172 Fix sortByName and percentileOfSeries in gfunc.js (#9169) docs: updated docs with v4.5 beta links Update CHANGELOG.md version bump to v4.5-beta1 fix: fixed jshint issue fix: Elasticsearch and adhoc filters fix, fixes #9165 docs: update to http api admin section ...
This commit is contained in:
commit
f7efa65647
12
.flooignore
12
.flooignore
@ -1,12 +0,0 @@
|
||||
#*
|
||||
*.o
|
||||
*.pyc
|
||||
*.pyo
|
||||
*~
|
||||
extern/
|
||||
node_modules/
|
||||
tmp/
|
||||
data/
|
||||
vendor/
|
||||
public_gen/
|
||||
dist/
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -39,3 +39,5 @@ profile.cov
|
||||
/pkg/cmd/grafana-cli/grafana-cli
|
||||
/pkg/cmd/grafana-server/grafana-server
|
||||
/examples/*/dist
|
||||
/packaging/**/*.rpm
|
||||
/packaging/**/*.deb
|
||||
|
70
CHANGELOG.md
70
CHANGELOG.md
@ -1,3 +1,73 @@
|
||||
# 5.0.0 (unreleased)
|
||||
|
||||
### WIP (in develop branch currently as its unstable or unfinished)
|
||||
- Dashboard folders
|
||||
- User groups
|
||||
- Dashboard permissions (on folder & dashboard level), permissions can be assigned to groups or individual users
|
||||
- UX changes to nav & side menu
|
||||
- New dashboard grid layout system
|
||||
|
||||
# 4.5.0 (unreleased)
|
||||
|
||||
## Enhancements
|
||||
|
||||
* **Shortcuts**: Adds shortcut for creating new dashboard [#8876](https://github.com/grafana/grafana/pull/8876) thx [@mtanda](https://github.com/mtanda)
|
||||
|
||||
# 4.5.0-beta1 (2017-09-05)
|
||||
|
||||
## New Features
|
||||
|
||||
* **Table panel**: Render cell values as links that can have an url template that uses variables from current table row. [#3754](https://github.com/grafana/grafana/issues/3754)
|
||||
* **Elasticsearch**: Add ad hoc filters directly by clicking values in table panel [#8052](https://github.com/grafana/grafana/issues/8052).
|
||||
* **MySQL**: New rich query editor with syntax highlighting
|
||||
* **Prometheus**: New rich query editor with syntax highlighting, metric & range auto complete and integrated function docs. [#5117](https://github.com/grafana/grafana/issues/5117)
|
||||
|
||||
## Enhancements
|
||||
|
||||
* **GitHub OAuth**: Support for GitHub organizations with 100+ teams. [#8846](https://github.com/grafana/grafana/issues/8846), thx [@skwashd](https://github.com/skwashd)
|
||||
* **Graphite**: Calls to Graphite api /metrics/find now include panel or dashboad time range (from & until) in most cases, [#8055](https://github.com/grafana/grafana/issues/8055)
|
||||
* **Graphite**: Added new graphite 1.0 functions, available if you set version to 1.0.x in data source settings. New Functions: mapSeries, reduceSeries, isNonNull, groupByNodes, offsetToZero, grep, weightedAverage, removeEmptySeries, aggregateLine, averageOutsidePercentile, delay, exponentialMovingAverage, fallbackSeries, integralByInterval, interpolate, invert, linearRegression, movingMin, movingMax, movingSum, multiplySeriesWithWildcards, pow, powSeries, removeBetweenPercentile, squareRoot, timeSlice, closes [#8261](https://github.com/grafana/grafana/issues/8261)
|
||||
- **Elasticsearch**: Ad-hoc filters now use query phrase match filters instead of term filters, works on non keyword/raw fields [#9095](https://github.com/grafana/grafana/issues/9095).
|
||||
|
||||
### Breaking change
|
||||
|
||||
* **InfluxDB/Elasticsearch**: The panel & data source option named "Group by time interval" is now named "Min time interval" and does now always define a lower limit for the auto group by time. Without having to use `>` prefix (that prefix still works). This should in theory have close to zero actual impact on existing dashboards. It does mean that if you used this setting to define a hard group by time interval of, say "1d", if you zoomed to a time range wide enough the time range could increase above the "1d" range as the setting is now always considered a lower limit.
|
||||
|
||||
## Changes
|
||||
|
||||
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
|
||||
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Modals**: Maintain scroll position after opening/leaving modal [#8800](https://github.com/grafana/grafana/issues/8800)
|
||||
* **Templating**: You cannot select data source variables as data source for other template variables [#7510](https://github.com/grafana/grafana/issues/7510)
|
||||
* **MySQL/Postgres**: Fix for max_idle_conn option default which was wrongly set to zero which does not mean unlimited but means zero, which in practice kind of disables connection pooling, which is not good. Fixes [#8513](https://github.com/grafana/grafana/issues/8513)
|
||||
|
||||
# 4.4.3 (2017-08-07)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Search**: Fix for issue that casued search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
|
||||
* **Modals**: ESC key now closes modal again, fixes [#8981](https://github.com/grafana/grafana/issues/8988), thx [@j-white](https://github.com/j-white)
|
||||
|
||||
# 4.4.2 (2017-08-01)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **GrafanaDB(mysql)**: Fix for dashboard_version.data column type, now changed to MEDIUMTEXT, fixes [#8813](https://github.com/grafana/grafana/issues/8813)
|
||||
* **Dashboard(settings)**: Closing setting views using ESC key did not update url correctly, fixes [#8869](https://github.com/grafana/grafana/issues/8869)
|
||||
* **InfluxDB**: Wrong username/password parameter name when using direct access, fixes [#8789](https://github.com/grafana/grafana/issues/8789)
|
||||
* **Forms(TextArea)**: Bug fix for no scroll in text areas [#8797](https://github.com/grafana/grafana/issues/8797)
|
||||
* **Png Render API**: Bug fix for timeout url parameter. It now works as it should. Default value was also increased from 30 to 60 seconds [#8710](https://github.com/grafana/grafana/issues/8710)
|
||||
* **Search**: Fix for not being able to close search by clicking on right side of search result container, [8848](https://github.com/grafana/grafana/issues/8848)
|
||||
* **Cloudwatch**: Fix for using variables in templating metrics() query, [8965](https://github.com/grafana/grafana/issues/8965)
|
||||
|
||||
## Changes
|
||||
|
||||
* **Settings(defaults)**: allow_sign_up default changed from true to false [#8743](https://github.com/grafana/grafana/issues/8743)
|
||||
* **Settings(defaults)**: allow_org_create default changed from true to false
|
||||
|
||||
# 4.4.1 (2017-07-05)
|
||||
|
||||
## Bug Fixes
|
||||
|
46
CODE_OF_CONDUCT.md
Normal file
46
CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,46 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at contact@grafana.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
93
README.md
93
README.md
@ -1,4 +1,4 @@
|
||||
[Grafana](https://grafana.com) [](https://circleci.com/gh/grafana/grafana)
|
||||
[Grafana](https://grafana.com) [](https://circleci.com/gh/grafana/grafana) [](https://goreportcard.com/report/github.com/grafana/grafana)
|
||||
================
|
||||
[Website](https://grafana.com) |
|
||||
[Twitter](https://twitter.com/grafana) |
|
||||
@ -9,65 +9,8 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB.
|
||||
|
||||

|
||||
|
||||
- [Install instructions](http://docs.grafana.org/installation/)
|
||||
- [What's New in Grafana 2.0](http://docs.grafana.org/guides/whats-new-in-v2/)
|
||||
- [What's New in Grafana 2.1](http://docs.grafana.org/guides/whats-new-in-v2-1/)
|
||||
- [What's New in Grafana 2.5](http://docs.grafana.org/guides/whats-new-in-v2-5/)
|
||||
- [What's New in Grafana 3.0](http://docs.grafana.org/guides/whats-new-in-v3/)
|
||||
- [What's New in Grafana 4.0](http://docs.grafana.org/guides/whats-new-in-v4/)
|
||||
- [What's New in Grafana 4.1](http://docs.grafana.org/guides/whats-new-in-v4-1/)
|
||||
- [What's New in Grafana 4.2](http://docs.grafana.org/guides/whats-new-in-v4-2/)
|
||||
- [What's New in Grafana 4.3](http://docs.grafana.org/guides/whats-new-in-v4-3/)
|
||||
- [What's New in Grafana 4.4](http://docs.grafana.org/guides/whats-new-in-v4-4/)
|
||||
|
||||
## Features
|
||||
|
||||
### Graphing
|
||||
- Fast rendering, even over large timespans
|
||||
- Click and drag to zoom
|
||||
- Multiple Y-axis, logarithmic scales
|
||||
- Bars, Lines, Points
|
||||
- Smart Y-axis formatting
|
||||
- Series toggles & color selector
|
||||
- Legend values, and formatting options
|
||||
- Grid thresholds, axis labels
|
||||
- [Annotations](http://docs.grafana.org/reference/annotations/)
|
||||
- Any panel can be rendered to PNG (server side using phantomjs)
|
||||
|
||||
### Dashboards
|
||||
- Create, edit, save & search dashboards
|
||||
- Change column spans and row heights
|
||||
- Drag and drop panels to rearrange
|
||||
- [Templating](http://docs.grafana.org/reference/templating/)
|
||||
- [Scripted dashboards](http://docs.grafana.org/reference/scripting/)
|
||||
- [Dashboard playlists](http://docs.grafana.org/reference/playlist/)
|
||||
- [Time range controls](http://docs.grafana.org/reference/timerange/)
|
||||
- [Share snapshots publicly](http://docs.grafana.org/v2.0/reference/sharing/)
|
||||
|
||||
### InfluxDB
|
||||
- Use InfluxDB as a metric data source, annotation source
|
||||
- Query editor with field and tag typeahead, easy group by and function selection
|
||||
|
||||
### Graphite
|
||||
- Graphite target expression parser
|
||||
- Feature rich query composer
|
||||
- Quickly add and edit functions & parameters
|
||||
- Templated queries
|
||||
- [See it in action](http://docs.grafana.org/datasources/graphite/)
|
||||
|
||||
### Elasticsearch, Prometheus & OpenTSDB
|
||||
- Feature rich query editor UI
|
||||
|
||||
### Alerting
|
||||
- Define alert rules using graphs & query conditions
|
||||
- Schedule & evalute alert rules, send notifications to Slack, Hipchat, Email, PagerDuty, etc.
|
||||
|
||||
## Requirements
|
||||
There are no dependencies except an external time series data store. For dashboards and user accounts Grafana can use an embedded
|
||||
database (sqlite3) or you can use an external SQL data base like MySQL or Postgres.
|
||||
|
||||
## Installation
|
||||
Head to [grafana.org](http://docs.grafana.org/installation/) and [download](https://grafana.com/get)
|
||||
Head to [docs.grafana.org](http://docs.grafana.org/installation/) and [download](https://grafana.com/get)
|
||||
the latest release.
|
||||
|
||||
If you have any problems please read the [troubleshooting guide](http://docs.grafana.org/installation/troubleshooting/).
|
||||
@ -84,27 +27,10 @@ the latest master builds [here](https://grafana.com/grafana/download)
|
||||
- Go 1.8.1
|
||||
- NodeJS LTS
|
||||
|
||||
### Get Code
|
||||
|
||||
```bash
|
||||
go get github.com/grafana/grafana
|
||||
```
|
||||
|
||||
Since imports of dependencies use the absolute path `github.com/grafana/grafana` within the `$GOPATH`,
|
||||
you will need to put your version of the code in `$GOPATH/src/github.com/grafana/grafana` to be able
|
||||
to develop and build grafana on a cloned repository. To do so, you can clone your forked repository
|
||||
directly to `$GOPATH/src/github.com/grafana` or you can create a symbolic link from your version
|
||||
of the code to `$GOPATH/src/github.com/grafana/grafana`. The last options makes it possible to change
|
||||
easily the grafana repository you want to build.
|
||||
```bash
|
||||
go get github.com/*your_account*/grafana
|
||||
mkdir $GOPATH/src/github.com/grafana
|
||||
ln -s $GOPATH/src/github.com/*your_account*/grafana $GOPATH/src/github.com/grafana/grafana
|
||||
```
|
||||
|
||||
### Building the backend
|
||||
```bash
|
||||
cd $GOPATH/src/github.com/grafana/grafana
|
||||
go get github.com/grafana/grafana
|
||||
cd ~/go/src/github.com/grafana/grafana
|
||||
go run build.go setup
|
||||
go run build.go build
|
||||
```
|
||||
@ -123,8 +49,7 @@ npm run build
|
||||
To build the frontend assets only on changes:
|
||||
|
||||
```bash
|
||||
sudo npm install -g grunt-cli # to do only once to install grunt command line interface
|
||||
grunt watch
|
||||
npm run dev
|
||||
```
|
||||
|
||||
### Recompile backend on source change
|
||||
@ -134,11 +59,6 @@ go get github.com/Unknwon/bra
|
||||
bra run
|
||||
```
|
||||
|
||||
### Running
|
||||
```bash
|
||||
./bin/grafana-server
|
||||
```
|
||||
|
||||
Open grafana in your browser (default: `http://localhost:3000`) and login with admin user (default: `user/pass = admin/admin`).
|
||||
|
||||
### Dev config
|
||||
@ -149,9 +69,6 @@ You only need to add the options you want to override. Config files are applied
|
||||
1. grafana.ini
|
||||
1. custom.ini
|
||||
|
||||
## Create a pull request
|
||||
Before or after you create a pull request, sign the [contributor license agreement](http://docs.grafana.org/project/cla/).
|
||||
|
||||
## Contribute
|
||||
If you have any idea for an improvement or found a bug do not hesitate to open an issue.
|
||||
And if you have time clone this repo and submit a pull request and help me make Grafana
|
||||
|
22
ROADMAP.md
22
ROADMAP.md
@ -1,31 +1,29 @@
|
||||
# Roadmap (2017-04-23)
|
||||
# Roadmap (2017-08-29)
|
||||
|
||||
This roadmap is a tentative plan for the core development team. Things change constantly as PRs come in and priorities change.
|
||||
But it will give you an idea of our current vision and plan.
|
||||
|
||||
### Short term (1-4 months)
|
||||
|
||||
- New Heatmap Panel (Implemented and available in master)
|
||||
- Support for MySQL & Postgres as data sources (Work started and a alpha version for MySQL is available in master)
|
||||
- User Groups & Dashboard folders with ACLs (work started, not yet completed, https://github.com/grafana/grafana/issues/1611#issuecomment-287742633)
|
||||
- Improve new user UX
|
||||
- Improve docs
|
||||
- Support for alerting for Elasticsearch (can be tested in [branch](https://github.com/grafana/grafana/tree/alerting-elasticsearch) but needs more work)
|
||||
- Graph annotations (create from grafana, region annotations, better annotation viz)
|
||||
- Improve alerting (clustering, silence rules)
|
||||
- Release Grafana v4.5 with fixes and minor enhancements
|
||||
- Release Grafana v5
|
||||
- User groups
|
||||
- Dashboard folders
|
||||
- Dashboard permissions (on folders as well), permissions on groups or users
|
||||
- New Dashboard layout engine
|
||||
- New sidemenu & nav UX
|
||||
- Elasticsearch alerting
|
||||
|
||||
### Long term
|
||||
|
||||
- Improved dashboard panel layout engine (to make it easier and enable more flexible layouts)
|
||||
- Backend plugins to support more Auth options, Alerting data sources & notifications
|
||||
- Universial time series transformations for any data source (meta queries)
|
||||
- Reporting
|
||||
- Web socket & live data streams
|
||||
- Migrate to Angular2
|
||||
- Migrate to Angular2 or react
|
||||
|
||||
|
||||
### Outside contributions
|
||||
We know this is being worked on right now by contributors (and we hope to merge it when it's ready).
|
||||
|
||||
- Dashboard revisions (be able to revert dashboard changes)
|
||||
- Clustering for alert engine (load distribution)
|
||||
|
@ -76,8 +76,10 @@ password =
|
||||
# Example: mysql://user:secret@host:port/database
|
||||
url =
|
||||
|
||||
# Max idle conn setting default is 2
|
||||
max_idle_conn = 2
|
||||
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
max_idle_conn =
|
||||
max_open_conn =
|
||||
|
||||
# For "postgres", use either "disable", "require" or "verify-full"
|
||||
@ -184,10 +186,10 @@ snapshot_TTL_days = 90
|
||||
#################################### Users ####################################
|
||||
[users]
|
||||
# disable user signup / registration
|
||||
allow_sign_up = true
|
||||
allow_sign_up = false
|
||||
|
||||
# Allow non admin users to create organizations
|
||||
allow_org_create = true
|
||||
allow_org_create = false
|
||||
|
||||
# Set to true to automatically assign new users to the default organization (id 1)
|
||||
auto_assign_org = true
|
||||
@ -204,6 +206,11 @@ login_hint = email or username
|
||||
# Default UI theme ("dark" or "light")
|
||||
default_theme = dark
|
||||
|
||||
# External user management
|
||||
external_manage_link_url =
|
||||
external_manage_link_name =
|
||||
external_manage_info =
|
||||
|
||||
[auth]
|
||||
# Set to true to disable (hide) the login form, useful if you use OAuth
|
||||
disable_login_form = false
|
||||
|
@ -85,8 +85,10 @@
|
||||
# For "sqlite3" only, path relative to data_path setting
|
||||
;path = grafana.db
|
||||
|
||||
# Max idle conn setting default is 2
|
||||
;max_idle_conn = 2
|
||||
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
;max_idle_conn =
|
||||
;max_open_conn =
|
||||
|
||||
|
||||
@ -191,6 +193,11 @@
|
||||
# Default UI theme ("dark" or "light")
|
||||
;default_theme = dark
|
||||
|
||||
# External user management, these options affect the organization users view
|
||||
;external_manage_link_url =
|
||||
;external_manage_link_name =
|
||||
;external_manage_info =
|
||||
|
||||
[auth]
|
||||
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
|
||||
;disable_login_form = false
|
||||
|
@ -32,6 +32,7 @@ add ./files/my_htpasswd /etc/nginx/.htpasswd
|
||||
# Add system service config
|
||||
add ./files/nginx.conf /etc/nginx/nginx.conf
|
||||
add ./files/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
# Nginx
|
||||
#
|
||||
# graphite
|
||||
@ -39,6 +40,7 @@ expose 80
|
||||
|
||||
# Carbon line receiver port
|
||||
expose 2003
|
||||
|
||||
# Carbon cache query port
|
||||
expose 7002
|
||||
|
||||
|
93
docker/blocks/graphite1/Dockerfile
Normal file
93
docker/blocks/graphite1/Dockerfile
Normal file
@ -0,0 +1,93 @@
|
||||
FROM phusion/baseimage:0.9.22
|
||||
MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com>
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y --force-yes install vim \
|
||||
nginx \
|
||||
python-dev \
|
||||
python-flup \
|
||||
python-pip \
|
||||
python-ldap \
|
||||
expect \
|
||||
git \
|
||||
memcached \
|
||||
sqlite3 \
|
||||
libffi-dev \
|
||||
libcairo2 \
|
||||
libcairo2-dev \
|
||||
python-cairo \
|
||||
python-rrdtool \
|
||||
pkg-config \
|
||||
nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# fix python dependencies (LTS Django and newer memcached/txAMQP)
|
||||
RUN pip install django==1.8.18 \
|
||||
python-memcached==1.53 \
|
||||
txAMQP==0.6.2 \
|
||||
&& pip install --upgrade pip
|
||||
|
||||
# install whisper
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper
|
||||
WORKDIR /usr/local/src/whisper
|
||||
RUN python ./setup.py install
|
||||
|
||||
# install carbon
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon
|
||||
WORKDIR /usr/local/src/carbon
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install graphite
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
WORKDIR /usr/local/src/graphite-web
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
|
||||
WORKDIR /opt/graphite/webapp
|
||||
RUN mkdir -p /var/log/graphite/ \
|
||||
&& PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings
|
||||
|
||||
# install statsd
|
||||
RUN git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
ADD conf/opt/statsd/config.js /opt/statsd/config.js
|
||||
|
||||
# config nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
ADD conf/etc/nginx/nginx.conf /etc/nginx/nginx.conf
|
||||
ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/graphite-statsd.conf
|
||||
|
||||
# init django admin
|
||||
ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp
|
||||
ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh
|
||||
RUN chmod +x /usr/local/bin/manage.sh \
|
||||
&& /usr/local/bin/django_admin_init.exp
|
||||
|
||||
# logging support
|
||||
RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx
|
||||
ADD conf/etc/logrotate.d/graphite-statsd /etc/logrotate.d/graphite-statsd
|
||||
|
||||
# daemons
|
||||
ADD conf/etc/service/carbon/run /etc/service/carbon/run
|
||||
ADD conf/etc/service/carbon-aggregator/run /etc/service/carbon-aggregator/run
|
||||
ADD conf/etc/service/graphite/run /etc/service/graphite/run
|
||||
ADD conf/etc/service/statsd/run /etc/service/statsd/run
|
||||
ADD conf/etc/service/nginx/run /etc/service/nginx/run
|
||||
|
||||
# default conf setup
|
||||
ADD conf /etc/graphite-statsd/conf
|
||||
ADD conf/etc/my_init.d/01_conf_init.sh /etc/my_init.d/01_conf_init.sh
|
||||
|
||||
# cleanup
|
||||
RUN apt-get clean\
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# defaults
|
||||
EXPOSE 80 2003-2004 2023-2024 8125/udp 8126
|
||||
VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"]
|
||||
WORKDIR /
|
||||
ENV HOME /root
|
||||
CMD ["/sbin/my_init"]
|
11
docker/blocks/graphite1/conf/etc/logrotate.d/graphite-statsd
Normal file
11
docker/blocks/graphite1/conf/etc/logrotate.d/graphite-statsd
Normal file
@ -0,0 +1,11 @@
|
||||
/var/log/*.log /var/log/*/*.log {
|
||||
weekly
|
||||
size 50M
|
||||
missingok
|
||||
rotate 10
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
copytruncate
|
||||
su root syslog
|
||||
}
|
36
docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
Executable file
36
docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
Executable file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
conf_dir=/etc/graphite-statsd/conf
|
||||
|
||||
# auto setup graphite with default configs if /opt/graphite is missing
|
||||
# needed for the use case when a docker host volume is mounted at an of the following:
|
||||
# - /opt/graphite
|
||||
# - /opt/graphite/conf
|
||||
# - /opt/graphite/webapp/graphite
|
||||
graphite_dir_contents=$(find /opt/graphite -mindepth 1 -print -quit)
|
||||
graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit)
|
||||
graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit)
|
||||
graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit)
|
||||
if [[ -z $graphite_dir_contents ]]; then
|
||||
git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
cd /usr/local/src/graphite-web && python ./setup.py install
|
||||
fi
|
||||
if [[ -z $graphite_storage_dir_contents ]]; then
|
||||
/usr/local/bin/django_admin_init.exp
|
||||
fi
|
||||
if [[ -z $graphite_conf_dir_contents ]]; then
|
||||
cp -R $conf_dir/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
fi
|
||||
if [[ -z $graphite_webapp_dir_contents ]]; then
|
||||
cp $conf_dir/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
fi
|
||||
|
||||
# auto setup statsd with default config if /opt/statsd is missing
|
||||
# needed for the use case when a docker host volume is mounted at an of the following:
|
||||
# - /opt/statsd
|
||||
statsd_dir_contents=$(find /opt/statsd -mindepth 1 -print -quit)
|
||||
if [[ -z $statsd_dir_contents ]]; then
|
||||
git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
cp $conf_dir/opt/statsd/config.js /opt/statsd/config.js
|
||||
fi
|
||||
|
96
docker/blocks/graphite1/conf/etc/nginx/nginx.conf
Normal file
96
docker/blocks/graphite1/conf/etc/nginx/nginx.conf
Normal file
@ -0,0 +1,96 @@
|
||||
user www-data;
|
||||
worker_processes 4;
|
||||
pid /run/nginx.pid;
|
||||
daemon off;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
##
|
||||
# nginx-naxsi config
|
||||
##
|
||||
# Uncomment it if you installed nginx-naxsi
|
||||
##
|
||||
|
||||
#include /etc/nginx/naxsi_core.rules;
|
||||
|
||||
##
|
||||
# nginx-passenger config
|
||||
##
|
||||
# Uncomment it if you installed nginx-passenger
|
||||
##
|
||||
|
||||
#passenger_root /usr;
|
||||
#passenger_ruby /usr/bin/ruby;
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
||||
|
||||
|
||||
#mail {
|
||||
# # See sample authentication script at:
|
||||
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
|
||||
#
|
||||
# # auth_http localhost/auth.php;
|
||||
# # pop3_capabilities "TOP" "USER";
|
||||
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
|
||||
#
|
||||
# server {
|
||||
# listen localhost:110;
|
||||
# protocol pop3;
|
||||
# proxy on;
|
||||
# }
|
||||
#
|
||||
# server {
|
||||
# listen localhost:143;
|
||||
# protocol imap;
|
||||
# proxy on;
|
||||
# }
|
||||
#}
|
@ -0,0 +1,31 @@
|
||||
server {
|
||||
listen 80;
|
||||
root /opt/graphite/static;
|
||||
index index.html;
|
||||
|
||||
location /media {
|
||||
# django admin static files
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/media/;
|
||||
}
|
||||
|
||||
location /admin/auth/admin {
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
|
||||
}
|
||||
|
||||
location /admin/auth/user/admin {
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type';
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
}
|
||||
|
||||
}
|
4
docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /opt/graphite/storage/carbon-aggregator-a.pid
|
||||
exec /usr/bin/python /opt/graphite/bin/carbon-aggregator.py start --debug 2>&1 >> /var/log/carbon-aggregator.log
|
4
docker/blocks/graphite1/conf/etc/service/carbon/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/carbon/run
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /opt/graphite/storage/carbon-cache-a.pid
|
||||
exec /usr/bin/python /opt/graphite/bin/carbon-cache.py start --debug 2>&1 >> /var/log/carbon.log
|
3
docker/blocks/graphite1/conf/etc/service/graphite/run
Executable file
3
docker/blocks/graphite1/conf/etc/service/graphite/run
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
export PYTHONPATH=/opt/graphite/webapp && exec /usr/local/bin/gunicorn wsgi --workers=4 --bind=127.0.0.1:8080 --log-file=/var/log/gunicorn.log --preload --pythonpath=/opt/graphite/webapp/graphite
|
4
docker/blocks/graphite1/conf/etc/service/nginx/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/nginx/run
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
mkdir -p /var/log/nginx
|
||||
exec /usr/sbin/nginx -c /etc/nginx/nginx.conf
|
4
docker/blocks/graphite1/conf/etc/service/statsd/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/statsd/run
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec /usr/bin/nodejs /opt/statsd/stats.js /opt/statsd/config.js >> /var/log/statsd.log 2>&1
|
||||
|
@ -0,0 +1,35 @@
|
||||
# The form of each line in this file should be as follows:
|
||||
#
|
||||
# output_template (frequency) = method input_pattern
|
||||
#
|
||||
# This will capture any received metrics that match 'input_pattern'
|
||||
# for calculating an aggregate metric. The calculation will occur
|
||||
# every 'frequency' seconds and the 'method' can specify 'sum' or
|
||||
# 'avg'. The name of the aggregate metric will be derived from
|
||||
# 'output_template' filling in any captured fields from 'input_pattern'.
|
||||
#
|
||||
# For example, if you're metric naming scheme is:
|
||||
#
|
||||
# <env>.applications.<app>.<server>.<metric>
|
||||
#
|
||||
# You could configure some aggregations like so:
|
||||
#
|
||||
# <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
|
||||
# <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
|
||||
#
|
||||
# As an example, if the following metrics are received:
|
||||
#
|
||||
# prod.applications.apache.www01.requests
|
||||
# prod.applications.apache.www01.requests
|
||||
#
|
||||
# They would all go into the same aggregation buffer and after 60 seconds the
|
||||
# aggregate metric 'prod.applications.apache.all.requests' would be calculated
|
||||
# by summing their values.
|
||||
#
|
||||
# Template components such as <env> will match everything up to the next dot.
|
||||
# To match metric multiple components including the dots, use <<metric>> in the
|
||||
# input template:
|
||||
#
|
||||
# <env>.applications.<app>.all.<app_metric> (60) = sum <env>.applications.<app>.*.<<app_metric>>
|
||||
#
|
||||
# Note that any time this file is modified, it will be re-read automatically.
|
@ -0,0 +1,5 @@
|
||||
# This file takes a single regular expression per line
|
||||
# If USE_WHITELIST is set to True in carbon.conf, any metrics received which
|
||||
# match one of these expressions will be dropped
|
||||
# This file is reloaded automatically when changes are made
|
||||
^some\.noisy\.metric\.prefix\..*
|
@ -0,0 +1,75 @@
|
||||
# This is a configuration file with AMQP enabled
|
||||
|
||||
[cache]
|
||||
LOCAL_DATA_DIR =
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Enable AMQP if you want to receve metrics using you amqp broker
|
||||
ENABLE_AMQP = True
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
AMQP_VERBOSE = True
|
||||
|
||||
# your credentials for the amqp server
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
|
||||
# the network settings for the amqp server
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
|
||||
# if you want to include the metric name as part of the message body
|
||||
# instead of as the routing key, set this to True
|
||||
# AMQP_METRIC_NAME_IN_BODY = False
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
359
docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf
Normal file
359
docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf
Normal file
@ -0,0 +1,359 @@
|
||||
[cache]
|
||||
# Configure carbon directories.
|
||||
#
|
||||
# OS environment variables can be used to tell carbon where graphite is
|
||||
# installed, where to read configuration from and where to write data.
|
||||
#
|
||||
# GRAPHITE_ROOT - Root directory of the graphite installation.
|
||||
# Defaults to ../
|
||||
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives).
|
||||
# Defaults to $GRAPHITE_ROOT/conf/
|
||||
# GRAPHITE_STORAGE_DIR - Storage directory for whipser/rrd/log/pid files.
|
||||
# Defaults to $GRAPHITE_ROOT/storage/
|
||||
#
|
||||
# To change other directory paths, add settings to this file. The following
|
||||
# configuration variables are available with these default values:
|
||||
#
|
||||
# STORAGE_DIR = $GRAPHITE_STORAGE_DIR
|
||||
# LOCAL_DATA_DIR = STORAGE_DIR/whisper/
|
||||
# WHITELISTS_DIR = STORAGE_DIR/lists/
|
||||
# CONF_DIR = STORAGE_DIR/conf/
|
||||
# LOG_DIR = STORAGE_DIR/log/
|
||||
# PID_DIR = STORAGE_DIR/
|
||||
#
|
||||
# For FHS style directory structures, use:
|
||||
#
|
||||
# STORAGE_DIR = /var/lib/carbon/
|
||||
# CONF_DIR = /etc/carbon/
|
||||
# LOG_DIR = /var/log/carbon/
|
||||
# PID_DIR = /var/run/
|
||||
#
|
||||
#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Enable daily log rotation. If disabled, a kill -HUP can be used after a manual rotate
|
||||
ENABLE_LOGROTATION = True
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
#
|
||||
# NOTE: The above settings must be set under [relay] and [aggregator]
|
||||
# to take effect for those daemons as well
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 500
|
||||
|
||||
# If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a
|
||||
# stop/shutdown is initiated. This helps when MAX_UPDATES_PER_SECOND is
|
||||
# relatively low and carbon has cached a lot of updates; it enables the carbon
|
||||
# daemon to shutdown more quickly.
|
||||
# MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = 50
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
# Set this to True to enable the UDP listener. By default this is off
|
||||
# because it is very common to run multiple carbon daemons and managing
|
||||
# another (rarely used) port for every carbon instance is not fun.
|
||||
ENABLE_UDP_LISTENER = False
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# Per security concerns outlined in Bug #817247 the pickle receiver
|
||||
# will use a more secure and slightly less efficient unpickler.
|
||||
# Set this to True to revert to the old-fashioned insecure unpickler.
|
||||
USE_INSECURE_UNPICKLER = False
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Set this to False to drop datapoints received after the cache
|
||||
# reaches MAX_CACHE_SIZE. If this is True (the default) then sockets
|
||||
# over which metrics are received will temporarily stop accepting
|
||||
# data until the cache size falls below 95% MAX_CACHE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# By default, carbon-cache will log every whisper update and cache hit. This can be excessive and
|
||||
# degrade performance if logging on the same volume as the whisper data is stored.
|
||||
LOG_UPDATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
LOG_CACHE_QUEUE_SORTS = True
|
||||
|
||||
# The thread that writes metrics to disk can use on of the following strategies
|
||||
# determining the order in which metrics are removed from cache and flushed to
|
||||
# disk. The default option preserves the same behavior as has been historically
|
||||
# available in version 0.9.10.
|
||||
#
|
||||
# sorted - All metrics in the cache will be counted and an ordered list of
|
||||
# them will be sorted according to the number of datapoints in the cache at the
|
||||
# moment of the list's creation. Metrics will then be flushed from the cache to
|
||||
# disk in that order.
|
||||
#
|
||||
# max - The writer thread will always pop and flush the metric from cache
|
||||
# that has the most datapoints. This will give a strong flush preference to
|
||||
# frequently updated metrics and will also reduce random file-io. Infrequently
|
||||
# updated metrics may only ever be persisted to disk at daemon shutdown if
|
||||
# there are a large number of metrics which receive very frequent updates OR if
|
||||
# disk i/o is very slow.
|
||||
#
|
||||
# naive - Metrics will be flushed from the cache to disk in an unordered
|
||||
# fashion. This strategy may be desirable in situations where the storage for
|
||||
# whisper files is solid state, CPU resources are very limited or deference to
|
||||
# the OS's i/o scheduler is expected to compensate for the random write
|
||||
# pattern.
|
||||
#
|
||||
CACHE_WRITE_STRATEGY = sorted
|
||||
|
||||
# On some systems it is desirable for whisper to write synchronously.
|
||||
# Set this option to True if you'd like to try this. Basically it will
|
||||
# shift the onus of buffering writes from the kernel into carbon's cache.
|
||||
WHISPER_AUTOFLUSH = False
|
||||
|
||||
# By default new Whisper files are created pre-allocated with the data region
|
||||
# filled with zeros to prevent fragmentation and speed up contiguous reads and
|
||||
# writes (which are common). Enabling this option will cause Whisper to create
|
||||
# the file sparsely instead. Enabling this option may allow a large increase of
|
||||
# MAX_CREATES_PER_MINUTE but may have longer term performance implications
|
||||
# depending on the underlying storage configuration.
|
||||
# WHISPER_SPARSE_CREATE = False
|
||||
|
||||
# Only beneficial on linux filesystems that support the fallocate system call.
|
||||
# It maintains the benefits of contiguous reads/writes, but with a potentially
|
||||
# much faster creation speed, by allowing the kernel to handle the block
|
||||
# allocation and zero-ing. Enabling this option may allow a large increase of
|
||||
# MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported
|
||||
# this option will gracefully fallback to standard POSIX file access methods.
|
||||
WHISPER_FALLOCATE_CREATE = True
|
||||
|
||||
# Enabling this option will cause Whisper to lock each Whisper file it writes
|
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
|
||||
# multiple carbon-cache daemons are writing to the same files
|
||||
# WHISPER_LOCK_WRITES = False
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
# AMQP_METRIC_NAME_IN_BODY = False
|
||||
|
||||
# The manhole interface allows you to SSH into the carbon daemon
|
||||
# and get a python interpreter. BE CAREFUL WITH THIS! If you do
|
||||
# something like time.sleep() in the interpreter, the whole process
|
||||
# will sleep! This is *extremely* helpful in debugging, assuming
|
||||
# you are familiar with the code. If you are not, please don't
|
||||
# mess with this, you are asking for trouble :)
|
||||
#
|
||||
# ENABLE_MANHOLE = False
|
||||
# MANHOLE_INTERFACE = 127.0.0.1
|
||||
# MANHOLE_PORT = 7222
|
||||
# MANHOLE_USER = admin
|
||||
# MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE=
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# To configure special settings for the carbon-cache instance 'b', uncomment this:
|
||||
#[cache:b]
|
||||
#LINE_RECEIVER_PORT = 2103
|
||||
#PICKLE_RECEIVER_PORT = 2104
|
||||
#CACHE_QUERY_PORT = 7102
|
||||
# and any other settings you want to customize, defaults are inherited
|
||||
# from [carbon] section.
|
||||
# You can then specify the --instance=b option to manage this instance
|
||||
|
||||
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2013
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2014
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# Carbon-relay has several options for metric routing controlled by RELAY_METHOD
|
||||
#
|
||||
# Use relay-rules.conf to route metrics to destinations based on pattern rules
|
||||
#RELAY_METHOD = rules
|
||||
#
|
||||
# Use consistent-hashing for even distribution of metrics between destinations
|
||||
#RELAY_METHOD = consistent-hashing
|
||||
#
|
||||
# Use consistent-hashing but take into account an aggregation-rules.conf shared
|
||||
# by downstream carbon-aggregator daemons. This will ensure that all metrics
|
||||
# that map to a given aggregation rule are sent to the same carbon-aggregator
|
||||
# instance.
|
||||
# Enable this for carbon-relays that send to a group of carbon-aggregators
|
||||
#RELAY_METHOD = aggregated-consistent-hashing
|
||||
RELAY_METHOD = rules
|
||||
|
||||
# If you use consistent-hashing you can add redundancy by replicating every
|
||||
# datapoint to more than one machine.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
# use multiple carbon-cache instances then it would look like this:
|
||||
#
|
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
#
|
||||
# The general form is IP:PORT:INSTANCE where the :INSTANCE part is
|
||||
# optional and refers to the "None" instance if omitted.
|
||||
#
|
||||
# Note that if the destinations are all carbon-caches then this should
|
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
|
||||
# instances listed (order matters!).
|
||||
#
|
||||
# If using RELAY_METHOD = rules, all destinations used in relay-rules.conf
|
||||
# must be defined in this list
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
|
||||
[aggregator]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2023
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2024
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# If set true, metric received will be forwarded to DESTINATIONS in addition to
|
||||
# the output of the aggregation rules. If set false the carbon-aggregator will
|
||||
# only ever send the output of aggregation.
|
||||
FORWARD_ALL = True
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
# use multiple carbon-cache instances then it would look like this:
|
||||
#
|
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
#
|
||||
# The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is
|
||||
# optional and refers to the "None" instance if omitted.
|
||||
#
|
||||
# Note that if the destinations are all carbon-caches then this should
|
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
|
||||
# instances listed (order matters!).
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# If you want to add redundancy to your data by replicating every
|
||||
# datapoint to more than one machine, increase this.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# This is the maximum number of datapoints that can be queued up
|
||||
# for a single destination. Once this limit is hit, we will
|
||||
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
|
||||
# we will drop any subsequently received datapoints.
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
|
||||
# This defines how many datapoints the aggregator remembers for
|
||||
# each metric. Aggregation only happens for datapoints that fall in
|
||||
# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
|
||||
MAX_AGGREGATION_INTERVALS = 5
|
||||
|
||||
# By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back
|
||||
# aggregated data points once every rule.frequency seconds, on a per-rule basis.
|
||||
# Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points
|
||||
# every N seconds, independent of rule frequency. This is useful, for example,
|
||||
# to be able to query partially aggregated metrics from carbon-cache without
|
||||
# having to first wait rule.frequency seconds.
|
||||
# WRITE_BACK_FREQUENCY = 0
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
@ -0,0 +1,57 @@
|
||||
# This configuration file controls the behavior of the Dashboard UI, available
|
||||
# at http://my-graphite-server/dashboard/.
|
||||
#
|
||||
# This file must contain a [ui] section that defines values for all of the
|
||||
# following settings.
|
||||
[ui]
|
||||
default_graph_width = 400
|
||||
default_graph_height = 250
|
||||
automatic_variants = true
|
||||
refresh_interval = 60
|
||||
autocomplete_delay = 375
|
||||
merge_hover_delay = 750
|
||||
|
||||
# You can set this 'default', 'white', or a custom theme name.
|
||||
# To create a custom theme, copy the dashboard-default.css file
|
||||
# to dashboard-myThemeName.css in the content/css directory and
|
||||
# modify it to your liking.
|
||||
theme = default
|
||||
|
||||
[keyboard-shortcuts]
|
||||
toggle_toolbar = ctrl-z
|
||||
toggle_metrics_panel = ctrl-space
|
||||
erase_all_graphs = alt-x
|
||||
save_dashboard = alt-s
|
||||
completer_add_metrics = alt-enter
|
||||
completer_del_metrics = alt-backspace
|
||||
give_completer_focus = shift-space
|
||||
|
||||
# These settings apply to the UI as a whole, all other sections in this file
|
||||
# pertain only to specific metric types.
|
||||
#
|
||||
# The dashboard presents only metrics that fall into specified naming schemes
|
||||
# defined in this file. This creates a simpler, more targetted view of the
|
||||
# data. The general form for defining a naming scheme is as follows:
|
||||
#
|
||||
#[Metric Type]
|
||||
#scheme = basis.path.<field1>.<field2>.<fieldN>
|
||||
#field1.label = Foo
|
||||
#field2.label = Bar
|
||||
#
|
||||
#
|
||||
# Where each <field> will be displayed as a dropdown box
|
||||
# in the UI and the remaining portion of the namespace
|
||||
# shown in the Metric Selector panel. The .label options set the labels
|
||||
# displayed for each dropdown.
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
#[Sales]
|
||||
#scheme = sales.<channel>.<type>.<brand>
|
||||
#channel.label = Channel
|
||||
#type.label = Product Type
|
||||
#brand.label = Brand
|
||||
#
|
||||
# This defines a 'Sales' metric type that uses 3 dropdowns in the Context Selector
|
||||
# (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc)
|
||||
# will be available in the Metric Selector (upper-right panel).
|
@ -0,0 +1,38 @@
|
||||
[default]
|
||||
background = black
|
||||
foreground = white
|
||||
majorLine = white
|
||||
minorLine = grey
|
||||
lineColors = blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose
|
||||
fontName = Sans
|
||||
fontSize = 10
|
||||
fontBold = False
|
||||
fontItalic = False
|
||||
|
||||
[noc]
|
||||
background = black
|
||||
foreground = white
|
||||
majorLine = white
|
||||
minorLine = grey
|
||||
lineColors = blue,green,red,yellow,purple,brown,aqua,grey,magenta,pink,gold,rose
|
||||
fontName = Sans
|
||||
fontSize = 10
|
||||
fontBold = False
|
||||
fontItalic = False
|
||||
|
||||
[plain]
|
||||
background = white
|
||||
foreground = black
|
||||
minorLine = grey
|
||||
majorLine = rose
|
||||
|
||||
[summary]
|
||||
background = black
|
||||
lineColors = #6666ff, #66ff66, #ff6666
|
||||
|
||||
[alphas]
|
||||
background = white
|
||||
foreground = black
|
||||
majorLine = grey
|
||||
minorLine = rose
|
||||
lineColors = 00ff00aa,ff000077,00337799
|
@ -0,0 +1,21 @@
|
||||
# Relay destination rules for carbon-relay. Entries are scanned in order,
|
||||
# and the first pattern a metric matches will cause processing to cease after sending
|
||||
# unless `continue` is set to true
|
||||
#
|
||||
# [name]
|
||||
# pattern = <regex>
|
||||
# destinations = <list of destination addresses>
|
||||
# continue = <boolean> # default: False
|
||||
#
|
||||
# name: Arbitrary unique name to identify the rule
|
||||
# pattern: Regex pattern to match against the metric name
|
||||
# destinations: Comma-separated list of destinations.
|
||||
# ex: 127.0.0.1, 10.1.2.3:2004, 10.1.2.4:2004:a, myserver.mydomain.com
|
||||
# continue: Continue processing rules if this rule matches (default: False)
|
||||
|
||||
# You must have exactly one section with 'default = true'
|
||||
# Note that all destinations listed must also exist in carbon.conf
|
||||
# in the DESTINATIONS setting in the [relay] section
|
||||
[default]
|
||||
default = true
|
||||
destinations = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
@ -0,0 +1,18 @@
|
||||
# This file defines regular expression patterns that can be used to
|
||||
# rewrite metric names in a search & replace fashion. It consists of two
|
||||
# sections, [pre] and [post]. The rules in the pre section are applied to
|
||||
# metric names as soon as they are received. The post rules are applied
|
||||
# after aggregation has taken place.
|
||||
#
|
||||
# The general form of each rule is as follows:
|
||||
#
|
||||
# regex-pattern = replacement-text
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
# [post]
|
||||
# _sum$ =
|
||||
# _avg$ =
|
||||
#
|
||||
# These rules would strip off a suffix of _sum or _avg from any metric names
|
||||
# after aggregation.
|
@ -0,0 +1,43 @@
|
||||
# Aggregation methods for whisper files. Entries are scanned in order,
|
||||
# and first match wins. This file is scanned for changes every 60 seconds
|
||||
#
|
||||
# [name]
|
||||
# pattern = <regex>
|
||||
# xFilesFactor = <float between 0 and 1>
|
||||
# aggregationMethod = <average|sum|last|max|min>
|
||||
#
|
||||
# name: Arbitrary unique name for the rule
|
||||
# pattern: Regex pattern to match against the metric name
|
||||
# xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur
|
||||
# aggregationMethod: function to apply to data points for aggregation
|
||||
#
|
||||
[min]
|
||||
pattern = \.lower$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.upper(_\d+)?$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.sum$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[count]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[count_legacy]
|
||||
pattern = ^stats_counts.*
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.3
|
||||
aggregationMethod = average
|
||||
|
@ -0,0 +1,17 @@
|
||||
# Schema definitions for Whisper files. Entries are scanned in order,
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
@ -0,0 +1,6 @@
|
||||
# This file takes a single regular expression per line
|
||||
# If USE_WHITELIST is set to True in carbon.conf, only metrics received which
|
||||
# match one of these expressions will be persisted. If this file is empty or
|
||||
# missing, all metrics will pass through.
|
||||
# This file is reloaded automatically when changes are made
|
||||
.*
|
@ -0,0 +1,94 @@
|
||||
"""Copyright 2008 Orbitz WorldWide
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License."""
|
||||
|
||||
# Django settings for graphite project.
|
||||
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
|
||||
from os.path import dirname, join, abspath
|
||||
|
||||
|
||||
#Django settings below, do not touch!
|
||||
APPEND_SLASH = False
|
||||
TEMPLATE_DEBUG = False
|
||||
|
||||
TEMPLATES = [
|
||||
{
|
||||
'BACKEND': 'django.template.backends.django.DjangoTemplates',
|
||||
'DIRS': [
|
||||
join(dirname( abspath(__file__) ), 'templates')
|
||||
],
|
||||
'APP_DIRS': True,
|
||||
'OPTIONS': {
|
||||
'context_processors': [
|
||||
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
|
||||
# list if you haven't customized them:
|
||||
'django.contrib.auth.context_processors.auth',
|
||||
'django.template.context_processors.debug',
|
||||
'django.template.context_processors.i18n',
|
||||
'django.template.context_processors.media',
|
||||
'django.template.context_processors.static',
|
||||
'django.template.context_processors.tz',
|
||||
'django.contrib.messages.context_processors.messages',
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
# Language code for this installation. All choices can be found here:
|
||||
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
|
||||
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
|
||||
LANGUAGE_CODE = 'en-us'
|
||||
|
||||
# Absolute path to the directory that holds media.
|
||||
MEDIA_ROOT = ''
|
||||
|
||||
# URL that handles the media served from MEDIA_ROOT.
|
||||
# Example: "http://media.lawrence.com"
|
||||
MEDIA_URL = ''
|
||||
|
||||
MIDDLEWARE_CLASSES = (
|
||||
'graphite.middleware.LogExceptionsMiddleware',
|
||||
'django.middleware.common.CommonMiddleware',
|
||||
'django.middleware.gzip.GZipMiddleware',
|
||||
'django.contrib.sessions.middleware.SessionMiddleware',
|
||||
'django.contrib.auth.middleware.AuthenticationMiddleware',
|
||||
'django.contrib.messages.middleware.MessageMiddleware',
|
||||
)
|
||||
|
||||
ROOT_URLCONF = 'graphite.urls'
|
||||
|
||||
INSTALLED_APPS = (
|
||||
'graphite.metrics',
|
||||
'graphite.render',
|
||||
'graphite.browser',
|
||||
'graphite.composer',
|
||||
'graphite.account',
|
||||
'graphite.dashboard',
|
||||
'graphite.whitelist',
|
||||
'graphite.events',
|
||||
'graphite.url_shortener',
|
||||
'django.contrib.auth',
|
||||
'django.contrib.sessions',
|
||||
'django.contrib.admin',
|
||||
'django.contrib.contenttypes',
|
||||
'django.contrib.staticfiles',
|
||||
'tagging',
|
||||
)
|
||||
|
||||
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
|
||||
|
||||
GRAPHITE_WEB_APP_SETTINGS_LOADED = True
|
||||
|
||||
STATIC_URL = '/static/'
|
||||
|
||||
STATIC_ROOT = '/opt/graphite/static/'
|
@ -0,0 +1,215 @@
|
||||
## Graphite local_settings.py
|
||||
# Edit this file to customize the default Graphite webapp settings
|
||||
#
|
||||
# Additional customizations to Django settings can be added to this file as well
|
||||
|
||||
#####################################
|
||||
# General Configuration #
|
||||
#####################################
|
||||
# Set this to a long, random unique string to use as a secret key for this
|
||||
# install. This key is used for salting of hashes used in auth tokens,
|
||||
# CRSF middleware, cookie storage, etc. This should be set identically among
|
||||
# instances if used behind a load balancer.
|
||||
#SECRET_KEY = 'UNSAFE_DEFAULT'
|
||||
|
||||
# In Django 1.5+ set this to the list of hosts your graphite instances is
|
||||
# accessible as. See:
|
||||
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
|
||||
#ALLOWED_HOSTS = [ '*' ]
|
||||
|
||||
# Set your local timezone (Django's default is America/Chicago)
|
||||
# If your graphs appear to be offset by a couple hours then this probably
|
||||
# needs to be explicitly set to your local timezone.
|
||||
#TIME_ZONE = 'America/Los_Angeles'
|
||||
|
||||
# Override this to provide documentation specific to your Graphite deployment
|
||||
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
|
||||
|
||||
# Logging
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
#LOG_METRIC_ACCESS = True
|
||||
|
||||
# Enable full debug page display on exceptions (Internal Server Error pages)
|
||||
#DEBUG = True
|
||||
|
||||
# If using RRD files and rrdcached, set to the address or socket of the daemon
|
||||
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
|
||||
|
||||
# This lists the memcached servers that will be used by this webapp.
|
||||
# If you have a cluster of webapps you should ensure all of them
|
||||
# have the *exact* same value for this setting. That will maximize cache
|
||||
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
|
||||
# memcached entirely.
|
||||
#
|
||||
# You should not use the loopback address (127.0.0.1) here if using clustering
|
||||
# as every webapp in the cluster should use the exact same values to prevent
|
||||
# unneeded cache misses. Set to [] to disable caching of images and fetched data
|
||||
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
|
||||
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
|
||||
|
||||
|
||||
#####################################
|
||||
# Filesystem Paths #
|
||||
#####################################
|
||||
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
|
||||
# to somewhere else
|
||||
#GRAPHITE_ROOT = '/opt/graphite'
|
||||
|
||||
# Most installs done outside of a separate tree such as /opt/graphite will only
|
||||
# need to change these three settings. Note that the default settings for each
|
||||
# of these is relative to GRAPHITE_ROOT
|
||||
#CONF_DIR = '/opt/graphite/conf'
|
||||
#STORAGE_DIR = '/opt/graphite/storage'
|
||||
#CONTENT_DIR = '/opt/graphite/webapp/content'
|
||||
|
||||
# To further or fully customize the paths, modify the following. Note that the
|
||||
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
|
||||
#
|
||||
## Webapp config files
|
||||
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
|
||||
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
|
||||
|
||||
## Data directories
|
||||
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
|
||||
#WHISPER_DIR = '/opt/graphite/storage/whisper'
|
||||
#RRD_DIR = '/opt/graphite/storage/rrd'
|
||||
#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
|
||||
#LOG_DIR = '/opt/graphite/storage/log/webapp'
|
||||
#INDEX_FILE = '/opt/graphite/storage/index' # Search index file
|
||||
|
||||
|
||||
#####################################
|
||||
# Email Configuration #
|
||||
#####################################
|
||||
# This is used for emailing rendered Graphs
|
||||
# Default backend is SMTP
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
|
||||
#EMAIL_HOST = 'localhost'
|
||||
#EMAIL_PORT = 25
|
||||
#EMAIL_HOST_USER = ''
|
||||
#EMAIL_HOST_PASSWORD = ''
|
||||
#EMAIL_USE_TLS = False
|
||||
# To drop emails on the floor, enable the Dummy backend:
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
|
||||
|
||||
|
||||
#####################################
|
||||
# Authentication Configuration #
|
||||
#####################################
|
||||
## LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
# OR
|
||||
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
#
|
||||
# If you want to further customize the ldap connection options you should
|
||||
# directly use ldap.set_option to set the ldap module's global options.
|
||||
# For example:
|
||||
#
|
||||
#import ldap
|
||||
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
|
||||
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
|
||||
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
|
||||
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
|
||||
# See http://www.python-ldap.org/ for further details on these options.
|
||||
|
||||
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
|
||||
#USE_REMOTE_USER_AUTHENTICATION = True
|
||||
|
||||
# Override the URL for the login link (e.g. for django_openid_auth)
|
||||
#LOGIN_URL = '/account/login'
|
||||
|
||||
|
||||
##########################
|
||||
# Database Configuration #
|
||||
##########################
|
||||
# By default sqlite is used. If you cluster multiple webapps you will need
|
||||
# to setup an external database (such as MySQL) and configure all of the webapp
|
||||
# instances to use the same database. Note that this database is only used to store
|
||||
# Django models such as saved graphs, dashboards, user preferences, etc.
|
||||
# Metric data is not stored here.
|
||||
#
|
||||
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
|
||||
#
|
||||
# The following built-in database engines are available:
|
||||
# django.db.backends.postgresql # Removed in Django 1.4
|
||||
# django.db.backends.postgresql_psycopg2
|
||||
# django.db.backends.mysql
|
||||
# django.db.backends.sqlite3
|
||||
# django.db.backends.oracle
|
||||
#
|
||||
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
|
||||
# located in STORAGE_DIR
|
||||
#
|
||||
#DATABASES = {
|
||||
# 'default': {
|
||||
# 'NAME': '/opt/graphite/storage/graphite.db',
|
||||
# 'ENGINE': 'django.db.backends.sqlite3',
|
||||
# 'USER': '',
|
||||
# 'PASSWORD': '',
|
||||
# 'HOST': '',
|
||||
# 'PORT': ''
|
||||
# }
|
||||
#}
|
||||
#
|
||||
|
||||
|
||||
#########################
|
||||
# Cluster Configuration #
|
||||
#########################
|
||||
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
|
||||
#
|
||||
# This should list the IP address (and optionally port) of the webapp on each
|
||||
# remote server in the cluster. These servers must each have local access to
|
||||
# metric data. Note that the first server to return a match for a query will be
|
||||
# used.
|
||||
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
|
||||
|
||||
## These are timeout values (in seconds) for requests to remote webapps
|
||||
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data
|
||||
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests
|
||||
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp
|
||||
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
|
||||
|
||||
## Remote rendering settings
|
||||
# Set to True to enable rendering of Graphs on a remote webapp
|
||||
#REMOTE_RENDERING = True
|
||||
# List of IP (and optionally port) of the webapp on each remote server that
|
||||
# will be used for rendering. Note that each rendering host should have local
|
||||
# access to metric data or should have CLUSTER_SERVERS configured
|
||||
#RENDERING_HOSTS = []
|
||||
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
|
||||
|
||||
# If you are running multiple carbon-caches on this machine (typically behind a relay using
|
||||
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
|
||||
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
|
||||
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
|
||||
#
|
||||
# You *should* use 127.0.0.1 here in most cases
|
||||
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
|
||||
#CARBONLINK_TIMEOUT = 1.0
|
||||
|
||||
#####################################
|
||||
# Additional Django Settings #
|
||||
#####################################
|
||||
# Uncomment the following line for direct access to Django settings such as
|
||||
# MIDDLEWARE_CLASSES or APPS
|
||||
#from graphite.app_settings import *
|
||||
|
||||
import os
|
||||
|
||||
LOG_DIR = '/var/log/graphite'
|
||||
SECRET_KEY = '$(date +%s | sha256sum | base64 | head -c 64)'
|
||||
|
||||
if (os.getenv("MEMCACHE_HOST") is not None):
|
||||
MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOST").split(",")
|
||||
|
||||
if (os.getenv("DEFAULT_CACHE_DURATION") is not None):
|
||||
DEFAULT_CACHE_DURATION = int(os.getenv("CACHE_DURATION"))
|
||||
|
6
docker/blocks/graphite1/conf/opt/statsd/config.js
Normal file
6
docker/blocks/graphite1/conf/opt/statsd/config.js
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"graphiteHost": "127.0.0.1",
|
||||
"graphitePort": 2003,
|
||||
"port": 8125,
|
||||
"flushInterval": 10000
|
||||
}
|
26
docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
Executable file
26
docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
Executable file
@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env expect
|
||||
|
||||
set timeout -1
|
||||
spawn /usr/local/bin/manage.sh
|
||||
|
||||
expect "Would you like to create one now" {
|
||||
send "yes\r"
|
||||
}
|
||||
|
||||
expect "Username" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Email address:" {
|
||||
send "root.graphite@mailinator.com\r"
|
||||
}
|
||||
|
||||
expect "Password:" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Password *:" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Superuser created successfully"
|
3
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file
3
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings
|
16
docker/blocks/graphite1/fig
Normal file
16
docker/blocks/graphite1/fig
Normal file
@ -0,0 +1,16 @@
|
||||
graphite:
|
||||
build: blocks/graphite1
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
76
docker/blocks/graphite1/files/carbon.conf
Normal file
76
docker/blocks/graphite1/files/carbon.conf
Normal file
@ -0,0 +1,76 @@
|
||||
[cache]
|
||||
LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
LOG_UPDATES = False
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
102
docker/blocks/graphite1/files/events_views.py
Normal file
102
docker/blocks/graphite1/files/events_views.py
Normal file
@ -0,0 +1,102 @@
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from django.utils.timezone import get_current_timezone
|
||||
from django.core.urlresolvers import get_script_prefix
|
||||
from django.http import HttpResponse
|
||||
from django.shortcuts import render_to_response, get_object_or_404
|
||||
from pytz import timezone
|
||||
|
||||
from graphite.util import json
|
||||
from graphite.events import models
|
||||
from graphite.render.attime import parseATTime
|
||||
|
||||
|
||||
def to_timestamp(dt):
|
||||
return time.mktime(dt.timetuple())
|
||||
|
||||
|
||||
class EventEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return to_timestamp(obj)
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def view_events(request):
|
||||
if request.method == "GET":
|
||||
context = { 'events' : fetch(request),
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("events.html", context)
|
||||
else:
|
||||
return post_event(request)
|
||||
|
||||
def detail(request, event_id):
|
||||
e = get_object_or_404(models.Event, pk=event_id)
|
||||
context = { 'event' : e,
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("event.html", context)
|
||||
|
||||
|
||||
def post_event(request):
|
||||
if request.method == 'POST':
|
||||
event = json.loads(request.body)
|
||||
assert isinstance(event, dict)
|
||||
|
||||
values = {}
|
||||
values["what"] = event["what"]
|
||||
values["tags"] = event.get("tags", None)
|
||||
values["when"] = datetime.datetime.fromtimestamp(
|
||||
event.get("when", time.time()))
|
||||
if "data" in event:
|
||||
values["data"] = event["data"]
|
||||
|
||||
e = models.Event(**values)
|
||||
e.save()
|
||||
|
||||
return HttpResponse(status=200)
|
||||
else:
|
||||
return HttpResponse(status=405)
|
||||
|
||||
def get_data(request):
|
||||
if 'jsonp' in request.REQUEST:
|
||||
response = HttpResponse(
|
||||
"%s(%s)" % (request.REQUEST.get('jsonp'),
|
||||
json.dumps(fetch(request), cls=EventEncoder)),
|
||||
mimetype='text/javascript')
|
||||
else:
|
||||
response = HttpResponse(
|
||||
json.dumps(fetch(request), cls=EventEncoder),
|
||||
mimetype="application/json")
|
||||
return response
|
||||
|
||||
def fetch(request):
|
||||
#XXX we need to move to USE_TZ=True to get rid of naive-time conversions
|
||||
def make_naive(dt):
|
||||
if 'tz' in request.GET:
|
||||
tz = timezone(request.GET['tz'])
|
||||
else:
|
||||
tz = get_current_timezone()
|
||||
local_dt = dt.astimezone(tz)
|
||||
if hasattr(local_dt, 'normalize'):
|
||||
local_dt = local_dt.normalize()
|
||||
return local_dt.replace(tzinfo=None)
|
||||
|
||||
if request.GET.get("from", None) is not None:
|
||||
time_from = make_naive(parseATTime(request.GET["from"]))
|
||||
else:
|
||||
time_from = datetime.datetime.fromtimestamp(0)
|
||||
|
||||
if request.GET.get("until", None) is not None:
|
||||
time_until = make_naive(parseATTime(request.GET["until"]))
|
||||
else:
|
||||
time_until = datetime.datetime.now()
|
||||
|
||||
tags = request.GET.get("tags", None)
|
||||
if tags is not None:
|
||||
tags = request.GET.get("tags").split(" ")
|
||||
|
||||
return [x.as_dict() for x in
|
||||
models.Event.find_events(time_from, time_until, tags=tags)]
|
20
docker/blocks/graphite1/files/initial_data.json
Normal file
20
docker/blocks/graphite1/files/initial_data.json
Normal file
@ -0,0 +1,20 @@
|
||||
[
|
||||
{
|
||||
"pk": 1,
|
||||
"model": "auth.user",
|
||||
"fields": {
|
||||
"username": "admin",
|
||||
"first_name": "",
|
||||
"last_name": "",
|
||||
"is_active": true,
|
||||
"is_superuser": true,
|
||||
"is_staff": true,
|
||||
"last_login": "2011-09-20 17:02:14",
|
||||
"groups": [],
|
||||
"user_permissions": [],
|
||||
"password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236",
|
||||
"email": "root@example.com",
|
||||
"date_joined": "2011-09-20 17:02:14"
|
||||
}
|
||||
}
|
||||
]
|
42
docker/blocks/graphite1/files/local_settings.py
Normal file
42
docker/blocks/graphite1/files/local_settings.py
Normal file
@ -0,0 +1,42 @@
|
||||
# Edit this file to override the default graphite settings, do not edit settings.py
|
||||
|
||||
# Turn on debugging and restart apache if you ever see an "Internal Server Error" page
|
||||
#DEBUG = True
|
||||
|
||||
# Set your local timezone (django will try to figure this out automatically)
|
||||
TIME_ZONE = 'UTC'
|
||||
|
||||
# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely
|
||||
#MEMCACHE_HOSTS = ['127.0.0.1:11211']
|
||||
|
||||
# Sometimes you need to do a lot of rendering work but cannot share your storage mount
|
||||
#REMOTE_RENDERING = True
|
||||
#RENDERING_HOSTS = ['fastserver01','fastserver02']
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
|
||||
# If you've got more than one backend server they should all be listed here
|
||||
#CLUSTER_SERVERS = []
|
||||
|
||||
# Override this if you need to provide documentation specific to your graphite deployment
|
||||
#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite"
|
||||
|
||||
# Enable email-related features
|
||||
#SMTP_SERVER = "mail.mycompany.com"
|
||||
|
||||
# LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
|
||||
# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!)
|
||||
#DATABASE_ENGINE = 'mysql' # or 'postgres'
|
||||
#DATABASE_NAME = 'graphite'
|
||||
#DATABASE_USER = 'graphite'
|
||||
#DATABASE_PASSWORD = 'graphite-is-awesome'
|
||||
#DATABASE_HOST = 'mysql.mycompany.com'
|
||||
#DATABASE_PORT = '3306'
|
1
docker/blocks/graphite1/files/my_htpasswd
Normal file
1
docker/blocks/graphite1/files/my_htpasswd
Normal file
@ -0,0 +1 @@
|
||||
grafana:$apr1$4R/20xhC$8t37jPP5dbcLr48btdkU//
|
70
docker/blocks/graphite1/files/nginx.conf
Normal file
70
docker/blocks/graphite1/files/nginx.conf
Normal file
@ -0,0 +1,70 @@
|
||||
daemon off;
|
||||
user www-data;
|
||||
worker_processes 1;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
server_tokens off;
|
||||
|
||||
server_names_hash_bucket_size 32;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
|
||||
open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Server $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
client_max_body_size 10m;
|
||||
client_body_buffer_size 128k;
|
||||
|
||||
proxy_connect_timeout 90;
|
||||
proxy_send_timeout 90;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 32k;
|
||||
proxy_busy_buffers_size 64k;
|
||||
proxy_temp_file_write_size 64k;
|
||||
}
|
||||
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
add_header Access-Control-Allow-Methods "GET, OPTIONS";
|
||||
add_header Access-Control-Allow-Headers "origin, authorization, accept";
|
||||
|
||||
location /content {
|
||||
alias /opt/graphite/webapp/content;
|
||||
|
||||
}
|
||||
|
||||
location /media {
|
||||
alias /usr/share/pyshared/django/contrib/admin/media;
|
||||
}
|
||||
}
|
||||
}
|
8
docker/blocks/graphite1/files/statsd_config.js
Normal file
8
docker/blocks/graphite1/files/statsd_config.js
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
graphitePort: 2003,
|
||||
graphiteHost: "127.0.0.1",
|
||||
port: 8125,
|
||||
mgmt_port: 8126,
|
||||
backends: ['./backends/graphite'],
|
||||
debug: true
|
||||
}
|
19
docker/blocks/graphite1/files/storage-aggregation.conf
Normal file
19
docker/blocks/graphite1/files/storage-aggregation.conf
Normal file
@ -0,0 +1,19 @@
|
||||
[min]
|
||||
pattern = \.min$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.max$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.5
|
||||
aggregationMethod = average
|
16
docker/blocks/graphite1/files/storage-schemas.conf
Normal file
16
docker/blocks/graphite1/files/storage-schemas.conf
Normal file
@ -0,0 +1,16 @@
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
26
docker/blocks/graphite1/files/supervisord.conf
Normal file
26
docker/blocks/graphite1/files/supervisord.conf
Normal file
@ -0,0 +1,26 @@
|
||||
[supervisord]
|
||||
nodaemon = true
|
||||
environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf'
|
||||
|
||||
[program:nginx]
|
||||
command = /usr/sbin/nginx
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:carbon-cache]
|
||||
;user = www-data
|
||||
command = /opt/graphite/bin/carbon-cache.py --debug start
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:graphite-webapp]
|
||||
;user = www-data
|
||||
directory = /opt/graphite/webapp
|
||||
environment = PYTHONPATH='/opt/graphite/webapp'
|
||||
command = /usr/bin/gunicorn_django -b127.0.0.1:8000 -w2 graphite/settings.py
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
@ -6,3 +6,4 @@ postgrestest:
|
||||
POSTGRES_DATABASE: grafana
|
||||
ports:
|
||||
- "5432:5432"
|
||||
command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql
|
||||
|
@ -1 +1 @@
|
||||
v4.2
|
||||
v4.3
|
||||
|
@ -22,7 +22,7 @@ to add and configure a `notification` channel (can be email, Pagerduty or other
|
||||
|
||||
{{< imgbox max-width="40%" img="/img/docs/v43/alert_notifications_menu.png" caption="Alerting Notification Channels" >}}
|
||||
|
||||
On the Notification Channels page hit the `New Channel` button to go the the page where you
|
||||
On the Notification Channels page hit the `New Channel` button to go the page where you
|
||||
can configure and setup a new Notification Channel.
|
||||
|
||||
You specify name and type, and type specific options. You can also test the notification to make
|
||||
@ -134,7 +134,7 @@ Grafana also supports the following Notification Channels:
|
||||
|
||||
# Enable images in notifications {#external-image-store}
|
||||
|
||||
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessable (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
|
||||
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessible (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
|
||||
Amazon S3 and Webdav for this. So to set that up you need to configure the [external image uploader](/installation/configuration/#external-image-storage) in your grafana-server ini config file.
|
||||
|
||||
Currently only the Email Channels attaches images if no external image store is specified. To include images in alert notifications for other channels then you need to set up an external image store.
|
||||
|
@ -27,14 +27,12 @@ and the conditions that need to be met for the alert to change state and trigger
|
||||
## Execution
|
||||
|
||||
The alert rules are evaluated in the Grafana backend in a scheduler and query execution engine that is part
|
||||
of core Grafana. Only some data soures are supported right now. They include `Graphite`, `Prometheus`,
|
||||
of core Grafana. Only some data sources are supported right now. They include `Graphite`, `Prometheus`,
|
||||
`InfluxDB` and `OpenTSDB`.
|
||||
|
||||
### Clustering
|
||||
|
||||
We have not implemented clustering yet. So if you run multiple instances of grafana-server
|
||||
you have to make sure [execute_alerts]({{< relref "installation/configuration.md#alerting" >}})
|
||||
is true on only one instance or otherwise you will get duplicated notifications.
|
||||
Currently alerting supports a limited form of high availability. Since v4.2.0 of Grafana, alert notifications are deduped when running multiple servers. This means all alerts are executed on every server but no duplicate alert notifications are sent due to the deduping logic. Proper load balancing of alerts will be introduced in the future.
|
||||
|
||||
<div class="clearfix"></div>
|
||||
|
||||
@ -61,8 +59,8 @@ specify a query letter, time range and an aggregation function.
|
||||
avg() OF query(A, 5m, now) IS BELOW 14
|
||||
```
|
||||
|
||||
- `avg()` Controls how the values for **each** serie should be reduced to a value that can be compared against the threshold. Click on the function to change it to another aggregation function.
|
||||
- `query(A, 5m, now)` The letter defines what query to execute from the **Metrics** tab. The second two parameters defines the time range, `5m, now` means 5 minutes from now to now. You can also do `10m, now-2m` to define a time range that will be 10 minutes from now to 2 minutes from now. This is useful if you want to ignore the last 2 minutes of data.
|
||||
- `avg()` Controls how the values for **each** series should be reduced to a value that can be compared against the threshold. Click on the function to change it to another aggregation function.
|
||||
- `query(A, 5m, now)` The letter defines what query to execute from the **Metrics** tab. The second two parameters define the time range, `5m, now` means 5 minutes from now to now. You can also do `10m, now-2m` to define a time range that will be 10 minutes from now to 2 minutes from now. This is useful if you want to ignore the last 2 minutes of data.
|
||||
- `IS BELOW 14` Defines the type of threshold and the threshold value. You can click on `IS BELOW` to change the type of threshold.
|
||||
|
||||
The query used in an alert rule cannot contain any template variables. Currently we only support `AND` and `OR` operators between conditions and they are executed serially.
|
||||
@ -76,7 +74,7 @@ of another alert in your conditions, and `Time Of Day`.
|
||||
#### Multiple Series
|
||||
|
||||
If a query returns multiple series then the aggregation function and threshold check will be evaluated for each series.
|
||||
What Grafana does not do currently is track alert rule state **per series**. This has implications that is exemplified
|
||||
What Grafana does not do currently is track alert rule state **per series**. This has implications that are detailed
|
||||
in the scenario below.
|
||||
|
||||
- Alert condition with query that returns 2 series: **server1** and **server2**
|
||||
@ -91,8 +89,7 @@ we plan to track state **per series** in a future release.
|
||||
|
||||
### No Data / Null values
|
||||
|
||||
Below you condition you can configure how the rule evaluation engine should handle queries that return no data or only null valued
|
||||
data.
|
||||
Below your conditions you can configure how the rule evaluation engine should handle queries that return no data or only null values.
|
||||
|
||||
No Data Option | Description
|
||||
------------ | -------------
|
||||
@ -102,23 +99,23 @@ Keep Last State | Keep the current alert rule state, what ever it is.
|
||||
|
||||
### Execution errors or timeouts
|
||||
|
||||
The last option is how to handle execution or timeout errors.
|
||||
The last option tells how to handle execution or timeout errors.
|
||||
|
||||
Error or timeout option | Description
|
||||
------------ | -------------
|
||||
Alerting | Set alert rule state to `Alerting`
|
||||
Keep Last State | Keep the current alert rule state, what ever it is.
|
||||
|
||||
If you an unreliable time series store that where queries sometime timeout or fail randomly you can set this option
|
||||
t `Keep Last State` to basically ignore them.
|
||||
If you have an unreliable time series store from which queries sometime timeout or fail randomly you can set this option
|
||||
to `Keep Last State` in order to basically ignore them.
|
||||
|
||||
## Notifications
|
||||
|
||||
In alert tab you can also specify alert rule notifications along with a detailed messsage about the alert rule.
|
||||
The message can contain anything, information about how you might solve the issue, link to runbook etc.
|
||||
The message can contain anything, information about how you might solve the issue, link to runbook, etc.
|
||||
|
||||
The actual notifications are configured and shared between multiple alerts. Read the
|
||||
[Notifications]({{< relref "notifications.md" >}}) guide for how to configure and setup notifications.
|
||||
[notifications]({{< relref "notifications.md" >}}) guide for how to configure and setup notifications.
|
||||
|
||||
## Alert State History & Annotations
|
||||
|
||||
@ -131,7 +128,7 @@ submenu in the alert tab to view & clear state history.
|
||||
{{< imgbox max-width="40%" img="/img/docs/v4/alert_test_rule.png" caption="Test Rule" >}}
|
||||
|
||||
First level of troubleshooting you can do is hit the **Test Rule** button. You will get result back that you can expand
|
||||
to the point where you can see the raw data that was returned form your query.
|
||||
to the point where you can see the raw data that was returned from your query.
|
||||
|
||||
Further troubleshooting can also be done by inspecting the grafana-server log. If it's not an error or for some reason
|
||||
the log does not say anything you can enable debug logging for some relevant components. This is done
|
||||
|
@ -13,6 +13,10 @@ Here you can find links to older versions of the documentation that might be bet
|
||||
of Grafana.
|
||||
|
||||
- [Latest](http://docs.grafana.org)
|
||||
- [Version 4.4](http://docs.grafana.org/v4.4)
|
||||
- [Version 4.3](http://docs.grafana.org/v4.3)
|
||||
- [Version 4.2](http://docs.grafana.org/v4.2)
|
||||
- [Version 4.1](http://docs.grafana.org/v4.1)
|
||||
- [Version 4.0](http://docs.grafana.org/v4.0)
|
||||
- [Version 3.1](http://docs.grafana.org/v3.1)
|
||||
- [Version 3.0](http://docs.grafana.org/v3.0)
|
||||
|
@ -84,8 +84,8 @@ Name | Description
|
||||
*metrics(namespace, [region])* | Returns a list of metrics in the namespace. (specify region for custom metrics)
|
||||
*dimension_keys(namespace)* | Returns a list of dimension keys in the namespace.
|
||||
*dimension_values(region, namespace, metric, dimension_key)* | Returns a list of dimension values matching the specified `region`, `namespace`, `metric` and `dimension_key`.
|
||||
*ebs_volume_ids(region, instance_id)* | Returns a list of volume id matching the specified `region`, `instance_id`.
|
||||
*ec2_instance_attribute(region, attribute_name, filters)* | Returns a list of attribute matching the specified `region`, `attribute_name`, `filters`.
|
||||
*ebs_volume_ids(region, instance_id)* | Returns a list of volume ids matching the specified `region`, `instance_id`.
|
||||
*ec2_instance_attribute(region, attribute_name, filters)* | Returns a list of attributes matching the specified `region`, `attribute_name`, `filters`.
|
||||
|
||||
For details about the metrics CloudWatch provides, please refer to the [CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
|
||||
|
||||
@ -101,10 +101,13 @@ Query | Service
|
||||
*dimension_values(us-east-1,AWS/RDS,CPUUtilization,DBInstanceIdentifier)* | RDS
|
||||
*dimension_values(us-east-1,AWS/S3,BucketSizeBytes,BucketName)* | S3
|
||||
|
||||
#### ec2_instance_attribute JSON filters
|
||||
## ec2_instance_attribute examples
|
||||
|
||||
The `ec2_instance_attribute` query take `filters` in JSON format.
|
||||
### JSON filters
|
||||
|
||||
The `ec2_instance_attribute` query takes `filters` in JSON format.
|
||||
You can specify [pre-defined filters of ec2:DescribeInstances](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html).
|
||||
Note that the actual filtering takes place on Amazon's servers, not in Grafana.
|
||||
|
||||
Filters syntax:
|
||||
|
||||
@ -116,6 +119,45 @@ Example `ec2_instance_attribute()` query
|
||||
|
||||
ec2_instance_attribute(us-east-1, InstanceId, { "tag:Environment": [ "production" ] })
|
||||
|
||||
### Selecting Attributes
|
||||
|
||||
Only 1 attribute per instance can be returned. Any flat attribute can be selected (i.e. if the attribute has a single value and isn't an object or array). Below is a list of available flat attributes:
|
||||
|
||||
* `AmiLaunchIndex`
|
||||
* `Architecture`
|
||||
* `ClientToken`
|
||||
* `EbsOptimized`
|
||||
* `EnaSupport`
|
||||
* `Hypervisor`
|
||||
* `IamInstanceProfile`
|
||||
* `ImageId`
|
||||
* `InstanceId`
|
||||
* `InstanceLifecycle`
|
||||
* `InstanceType`
|
||||
* `KernelId`
|
||||
* `KeyName`
|
||||
* `LaunchTime`
|
||||
* `Platform`
|
||||
* `PrivateDnsName`
|
||||
* `PrivateIpAddress`
|
||||
* `PublicDnsName`
|
||||
* `PublicIpAddress`
|
||||
* `RamdiskId`
|
||||
* `RootDeviceName`
|
||||
* `RootDeviceType`
|
||||
* `SourceDestCheck`
|
||||
* `SpotInstanceRequestId`
|
||||
* `SriovNetSupport`
|
||||
* `SubnetId`
|
||||
* `VirtualizationType`
|
||||
* `VpcId`
|
||||
|
||||
Tags can be selected by prepending the tag name with `Tags.`
|
||||
|
||||
Example `ec2_instance_attribute()` query
|
||||
|
||||
ec2_instance_attribute(us-east-1, Tags.Name, { "tag:Team": [ "sysops" ] })
|
||||
|
||||
## Cost
|
||||
|
||||
Amazon provides 1 million CloudWatch API requests each month at no additional charge. Past this,
|
||||
|
@ -36,7 +36,7 @@ Name | Description
|
||||
### Proxy vs Direct access
|
||||
|
||||
Proxy access means that the Grafana backend will proxy all requests from the browser. So requests to InfluxDB will be channeled through
|
||||
`grafana-server`. This means that the URL you specify needs to be accessable from the server you are running Grafana on. Proxy access
|
||||
`grafana-server`. This means that the URL you specify needs to be accessible from the server you are running Grafana on. Proxy access
|
||||
mode is also more secure as the username & password will never reach the browser.
|
||||
|
||||
## Query Editor
|
||||
@ -88,7 +88,7 @@ You can switch to raw query mode by clicking hamburger icon and then `Switch edi
|
||||
- $m = replaced with measurement name
|
||||
- $measurement = replaced with measurement name
|
||||
- $col = replaced with column name
|
||||
- $tag_exampletag = replaced with the value of the `exampletag` tag. To use your tag as an alias in the ALIAS BY field then the tag must be used to group by in the query.
|
||||
- $tag_exampletag = replaced with the value of the `exampletag` tag. The syntax is `$tag_yourTagName` (must start with `$tag_`). To use your tag as an alias in the ALIAS BY field then the tag must be used to group by in the query.
|
||||
- You can also use [[tag_hostname]] pattern replacement syntax. For example, in the ALIAS BY field using this text `Host: [[tag_hostname]]` would substitute in the `hostname` tag value for each legend value and an example legend value would be: `Host: server1`.
|
||||
|
||||
### Table query / raw data
|
||||
@ -132,7 +132,7 @@ You can fetch key names for a given measurement.
|
||||
SHOW TAG KEYS [FROM <measurement_name>]
|
||||
```
|
||||
|
||||
If you have a variable with key names you can use this variable in a group by clause. This will allow you to change group by using the variable dropdown a the top
|
||||
If you have a variable with key names you can use this variable in a group by clause. This will allow you to change group by using the variable dropdown at the top
|
||||
of the dashboard.
|
||||
|
||||
### Using variables in queries
|
||||
|
@ -29,8 +29,7 @@ data from a MySQL compatible database.
|
||||
The database user you specify when you add the data source should only be granted SELECT permissions on
|
||||
the specified database & tables you want to query. Grafana does not validate that the query is safe. The query
|
||||
could include any SQL statement. For example, statements like `USE otherdb;` and `DROP TABLE user;` would be
|
||||
executed. To protect against this we **Highly** recommmend you create a specific mysql user with
|
||||
restricted permissions.
|
||||
executed. To protect against this we **Highly** recommmend you create a specific mysql user with restricted permissions.
|
||||
|
||||
Example:
|
||||
|
||||
@ -49,11 +48,9 @@ Macro example | Description
|
||||
------------ | -------------
|
||||
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn > FROM_UNIXTIME(1494410783) AND dateColumn < FROM_UNIXTIME(1494497183)*
|
||||
|
||||
We plan to add many more macros. If you have suggestions for what macros you would like to see, please
|
||||
[open an issue](https://github.com/grafana/grafana) in our GitHub repo.
|
||||
We plan to add many more macros. If you have suggestions for what macros you would like to see, please [open an issue](https://github.com/grafana/grafana) in our GitHub repo.
|
||||
|
||||
The query editor has a link named `Generated SQL` that show up after a query as been executed, while in panel edit mode. Click
|
||||
on it and it will expand and show the raw interpolated SQL string that was executed.
|
||||
The query editor has a link named `Generated SQL` that show up after a query as been executed, while in panel edit mode. Click on it and it will expand and show the raw interpolated SQL string that was executed.
|
||||
|
||||
## Table queries
|
||||
|
||||
@ -109,8 +106,71 @@ This is something we plan to add.
|
||||
|
||||
## Templating
|
||||
|
||||
You can use variables in your queries but there are currently no support for defining `Query` variables
|
||||
that target a MySQL data source.
|
||||
This feature is currently available in the nightly builds and will be included in the 5.0.0 release.
|
||||
|
||||
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data being displayed in your dashboard.
|
||||
|
||||
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different types of template variables.
|
||||
|
||||
### Query Variable
|
||||
|
||||
If you add a template variable of the type `Query`, you can write a MySQL query that can
|
||||
return things like measurement names, key names or key values that are shown as a dropdown select box.
|
||||
|
||||
For example, you can have a variable that contains all values for the `hostname` column in a table if you specify a query like this in the templating variable *Query* setting.
|
||||
|
||||
```sql
|
||||
SELECT hostname FROM my_host
|
||||
```
|
||||
|
||||
A query can returns multiple columns and Grafana will automatically create a list from them. For example, the query below will return a list with values from `hostname` and `hostname2`.
|
||||
|
||||
```sql
|
||||
SELECT my_host.hostname, my_other_host.hostname2 FROM my_host JOIN my_other_host ON my_host.city = my_other_host.city
|
||||
```
|
||||
|
||||
Another option is a query that can create a key/value variable. The query should return two columns that are named `__text` and `__value`. The `__text` column value should be unique (if it is not unique then the first value is used). The options in the dropdown will have a text and value that allows you to have a friendly name as text and an id as the value. An example query with `hostname` as the text and `id` as the value:
|
||||
|
||||
```sql
|
||||
SELECT hostname AS __text, id AS __value FROM my_host
|
||||
```
|
||||
|
||||
You can also create nested variables. For example if you had another variable named `region`. Then you could have
|
||||
the hosts variable only show hosts from the current selected region with a query like this (if `region` is a multi-value variable then use the `IN` comparison operator rather than `=` to match against multiple values):
|
||||
|
||||
```sql
|
||||
SELECT hostname FROM my_host WHERE region IN($region)
|
||||
```
|
||||
|
||||
### Using Variables in Queries
|
||||
|
||||
Template variables are quoted automatically so if it is a string value do not wrap them in quotes in where clauses. If the variable is a multi-value variable then use the `IN` comparison operator rather than `=` to match against multiple values.
|
||||
|
||||
There are two syntaxes:
|
||||
|
||||
`$<varname>` Example with a template variable named `hostname`:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
UNIX_TIMESTAMP(atimestamp) as time_sec,
|
||||
aint as value,
|
||||
avarchar as metric
|
||||
FROM my_table
|
||||
WHERE $__timeFilter(atimestamp) and hostname in($hostname)
|
||||
ORDER BY atimestamp ASC
|
||||
```
|
||||
|
||||
`[[varname]]` Example with a template variable named `hostname`:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
UNIX_TIMESTAMP(atimestamp) as time_sec,
|
||||
aint as value,
|
||||
avarchar as metric
|
||||
FROM my_table
|
||||
WHERE $__timeFilter(atimestamp) and hostname in([[hostname]])
|
||||
ORDER BY atimestamp ASC
|
||||
```
|
||||
|
||||
## Alerting
|
||||
|
||||
|
@ -78,7 +78,7 @@ For details of *metric names*, *label names* and *label values* are please refer
|
||||
There are two syntaxes:
|
||||
|
||||
- `$<varname>` Example: rate(http_requests_total{job=~"$job"}[5m])
|
||||
- `[[varname]]` Example: rate(http_requests_total{job="my[[job]]"}[5m])
|
||||
- `[[varname]]` Example: rate(http_requests_total{job=~"[[job]]"}[5m])
|
||||
|
||||
Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. When the *Multi-value* or *Include all value*
|
||||
options are enabled, Grafana converts the labels from plain text to a regex compatible string. Which means you have to use `=~` instead of `=`.
|
||||
|
@ -13,7 +13,7 @@ weight = 20
|
||||
|
||||
The purpose of this data sources is to make it easier to create fake data for any panel.
|
||||
Using `Grafana TestData` you can build your own time series and have any panel render it.
|
||||
This make is much easier to verify functionally since the data can be shared very
|
||||
This make is much easier to verify functionally since the data can be shared very easily.
|
||||
|
||||
## Enable
|
||||
|
||||
|
@ -34,7 +34,7 @@ The singlestat panel has a normal query editor to allow you define your exact me
|
||||
* `delta` - The total incremental increase (of a counter) in the series. An attempt is made to account for counter resets, but this will only be accurate for single instance metrics. Used to show total counter increase in time series.
|
||||
* `diff` - The difference betwen 'current' (last value) and 'first'.
|
||||
* `range` - The difference between 'min' and 'max'. Useful the show the range of change for a gauge.
|
||||
4. `Postfixes`: The Postfix fields let you define a custom label and font-size (as a %) to appear *after* the value
|
||||
4. `Prefix/Postfix`: The Prefix/Postfix fields let you define a custom label and font-size (as a %) to appear *before/after* the value. The `$__name` variable can be used here to use the series name or alias from the metric query.
|
||||
5. `Units`: Units are appended to the the Singlestat within the panel, and will respect the color and threshold settings for the value.
|
||||
6. `Decimals`: The Decimal field allows you to override the automatic decimal precision, and set it explicitly.
|
||||
|
||||
|
@ -24,9 +24,9 @@ Read the [Basic Concepts](/guides/basic_concepts) document to get a crash course
|
||||
|
||||
### Top header
|
||||
|
||||
Let's start with creating a new Dashboard. You can find the new Dashboard link at the bottom of the Dashboard picker. You now have a blank Dashboard.
|
||||
Let's start with creating a new Dashboard. You can find the new Dashboard link on the right side of the Dashboard picker. You now have a blank Dashboard.
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v2/v2_top_nav_annotated.png">
|
||||
<img class="no-shadow" src="/img/docs/v45/top_nav_annotated.png">
|
||||
|
||||
The image above shows you the top header for a Dashboard.
|
||||
|
||||
@ -41,19 +41,7 @@ The image above shows you the top header for a Dashboard.
|
||||
|
||||
Dashboards are at the core of what Grafana is all about. Dashboards are composed of individual Panels arranged on a number of Rows. Grafana ships with a variety of Panels. Grafana makes it easy to construct the right queries, and customize the display properties so that you can create the perfect Dashboard for your need. Each Panel can interact with data from any configured Grafana Data Source (currently InfluxDB, Graphite, OpenTSDB, Prometheus and Cloudwatch). The [Basic Concepts](/guides/basic_concepts) guide explores these key ideas in detail.
|
||||
|
||||
|
||||
## Adding & Editing Graphs and Panels
|
||||
|
||||

|
||||
|
||||
1. You add panels via row menu. The row menu is the green icon to the left of each row.
|
||||
2. To edit the graph you click on the graph title to open the panel menu, then `Edit`.
|
||||
3. This should take you to the `Metrics` tab. In this tab you should see the editor for your default data source.
|
||||
|
||||
When you click the `Metrics` tab, you are presented with a Query Editor that is specific to the Panel Data Source. Use the Query Editor to build your queries and Grafana will visualize them in real time.
|
||||
|
||||
|
||||
<img src="/img/docs/v2/dashboard_annotated.png" class="no-shadow">
|
||||
<img src="/img/docs/v45/dashboard_annotated.png" class="no-shadow">
|
||||
|
||||
1. Zoom out time range
|
||||
2. Time picker dropdown. Here you can access relative time range options, auto refresh options and set custom absolute time ranges.
|
||||
@ -62,6 +50,17 @@ When you click the `Metrics` tab, you are presented with a Query Editor that is
|
||||
5. Dashboard panel. You edit panels by clicking the panel title.
|
||||
6. Graph legend. You can change series colors, y-axis and series visibility directly from the legend.
|
||||
|
||||
|
||||
## Adding & Editing Graphs and Panels
|
||||
|
||||

|
||||
|
||||
1. You add panels via row menu. The row menu is the icon to the left of each row.
|
||||
2. To edit the graph you click on the graph title to open the panel menu, then `Edit`.
|
||||
3. This should take you to the `Metrics` tab. In this tab you should see the editor for your default data source.
|
||||
|
||||
When you click the `Metrics` tab, you are presented with a Query Editor that is specific to the Panel Data Source. Use the Query Editor to build your queries and Grafana will visualize them in real time.
|
||||
|
||||
## Drag-and-Drop panels
|
||||
|
||||
You can Drag-and-Drop Panels within and between Rows. Click and hold the Panel title, and drag it to its new location. You can also easily resize panels by clicking the (-) and (+) icons.
|
||||
|
74
docs/sources/guides/whats-new-in-v4-5.md
Normal file
74
docs/sources/guides/whats-new-in-v4-5.md
Normal file
@ -0,0 +1,74 @@
|
||||
+++
|
||||
title = "What's New in Grafana v4.5"
|
||||
description = "Feature & improvement highlights for Grafana v4.5"
|
||||
keywords = ["grafana", "new", "documentation", "4.5"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
name = "Version 4.5"
|
||||
identifier = "v4.5"
|
||||
parent = "whatsnew"
|
||||
weight = -4
|
||||
+++
|
||||
|
||||
# What's New in Grafana v4.5
|
||||
|
||||
## Hightlights
|
||||
|
||||
### New prometheus query editor
|
||||
|
||||
The new query editor has full syntax highlighting. As well as auto complete for metrics, functions, and range vectors.
|
||||
|
||||

|
||||
|
||||
There is also integrated function docs right from the query editor!
|
||||
|
||||

|
||||
|
||||
### Elasticsearch: Add ad-hoc filters from the table panel
|
||||

|
||||
|
||||
### Table cell links!
|
||||
Create column styles that turn cells into links that use the value in the cell (or other other row values) to generate a url to another dashboard or system:
|
||||

|
||||
|
||||
### Query Inspector
|
||||
Query Inspector is a new feature that shows query requests and responses. This can be helpful if a graph is not shown or shows something very different than what you expected.
|
||||
More information [here](https://community.grafana.com/t/using-grafanas-query-inspector-to-troubleshoot-issues/2630).
|
||||

|
||||
|
||||
## Changelog
|
||||
|
||||
### New Features
|
||||
|
||||
* **Table panel**: Render cell values as links that can have an url template that uses variables from current table row. [#3754](https://github.com/grafana/grafana/issues/3754)
|
||||
* **Elasticsearch**: Add ad hoc filters directly by clicking values in table panel [#8052](https://github.com/grafana/grafana/issues/8052).
|
||||
* **MySQL**: New rich query editor with syntax highlighting
|
||||
* **Prometheus**: New rich query editor with syntax highlighting, metric & range auto complete and integrated function docs. [#5117](https://github.com/grafana/grafana/issues/5117)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* **GitHub OAuth**: Support for GitHub organizations with 100+ teams. [#8846](https://github.com/grafana/grafana/issues/8846), thx [@skwashd](https://github.com/skwashd)
|
||||
* **Graphite**: Calls to Graphite api /metrics/find now include panel or dashboad time range (from & until) in most cases, [#8055](https://github.com/grafana/grafana/issues/8055)
|
||||
* **Graphite**: Added new graphite 1.0 functions, available if you set version to 1.0.x in data source settings. New Functions: mapSeries, reduceSeries, isNonNull, groupByNodes, offsetToZero, grep, weightedAverage, removeEmptySeries, aggregateLine, averageOutsidePercentile, delay, exponentialMovingAverage, fallbackSeries, integralByInterval, interpolate, invert, linearRegression, movingMin, movingMax, movingSum, multiplySeriesWithWildcards, pow, powSeries, removeBetweenPercentile, squareRoot, timeSlice, closes [#8261](https://github.com/grafana/grafana/issues/8261)
|
||||
- **Elasticsearch**: Ad-hoc filters now use query phrase match filters instead of term filters, works on non keyword/raw fields [#9095](https://github.com/grafana/grafana/issues/9095).
|
||||
|
||||
### Breaking change
|
||||
|
||||
* **InfluxDB/Elasticsearch**: The panel & data source option named "Group by time interval" is now named "Min time interval" and does now always define a lower limit for the auto group by time. Without having to use `>` prefix (that prefix still works). This should in theory have close to zero actual impact on existing dashboards. It does mean that if you used this setting to define a hard group by time interval of, say "1d", if you zoomed to a time range wide enough the time range could increase above the "1d" range as the setting is now always considered a lower limit.
|
||||
|
||||
This option is now rennamed (and moved to Options sub section above your queries):
|
||||

|
||||
|
||||
Datas source selection & options & help are now above your metric queries.
|
||||

|
||||
|
||||
### Minor Changes
|
||||
|
||||
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
|
||||
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Modals**: Maintain scroll position after opening/leaving modal [#8800](https://github.com/grafana/grafana/issues/8800)
|
||||
* **Templating**: You cannot select data source variables as data source for other template variables [#7510](https://github.com/grafana/grafana/issues/7510)
|
||||
|
@ -11,14 +11,16 @@ parent = "http_api"
|
||||
|
||||
# Admin API
|
||||
|
||||
The admin http API does not currently work with an api token. Api Token's are currently only linked to an organization and organization role. They cannot given
|
||||
the permission of server admin, only user's can be given that permission. So in order to use these API calls you will have to use basic auth and Grafana user
|
||||
with Grafana admin permission.
|
||||
The Admin HTTP API does not currently work with an API Token. API Tokens are currently only linked to an organization and an organization role. They cannot be given
|
||||
the permission of server admin, only users can be given that permission. So in order to use these API calls you will have to use Basic Auth and the Grafana user
|
||||
must have the Grafana Admin permission. (The default admin user is called `admin` and has permission to use this API.)
|
||||
|
||||
## Settings
|
||||
|
||||
`GET /api/admin/settings`
|
||||
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
GET /api/admin/settings
|
||||
@ -176,6 +178,8 @@ with Grafana admin permission.
|
||||
|
||||
`GET /api/admin/stats`
|
||||
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
GET /api/admin/stats
|
||||
@ -203,7 +207,7 @@ with Grafana admin permission.
|
||||
|
||||
`POST /api/admin/users`
|
||||
|
||||
Create new user
|
||||
Create new user. Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
@ -229,7 +233,8 @@ Create new user
|
||||
|
||||
`PUT /api/admin/users/:id/password`
|
||||
|
||||
Change password for specific user
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
Change password for a specific user.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
@ -250,6 +255,8 @@ Change password for specific user
|
||||
|
||||
`PUT /api/admin/users/:id/permissions`
|
||||
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /api/admin/users/2/permissions HTTP/1.1
|
||||
@ -269,6 +276,8 @@ Change password for specific user
|
||||
|
||||
`DELETE /api/admin/users/:id`
|
||||
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
DELETE /api/admin/users/2 HTTP/1.1
|
||||
@ -286,6 +295,8 @@ Change password for specific user
|
||||
|
||||
`POST /api/admin/pause-all-alerts`
|
||||
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
POST /api/admin/pause-all-alerts HTTP/1.1
|
||||
|
@ -240,7 +240,7 @@ Get all tags of dashboards
|
||||
|
||||
`GET /api/search/`
|
||||
|
||||
Status Codes:
|
||||
Query parameters:
|
||||
|
||||
- **query** – Search Query
|
||||
- **tag** – Tag to use
|
||||
@ -269,9 +269,3 @@ Status Codes:
|
||||
"isStarred":false
|
||||
}
|
||||
]
|
||||
|
||||
"email":"admin@mygraf.com",
|
||||
"login":"admin",
|
||||
"role":"Admin"
|
||||
}
|
||||
]
|
||||
|
@ -137,7 +137,7 @@ parent = "http_api"
|
||||
|
||||
`POST /api/datasources`
|
||||
|
||||
**Example Request**:
|
||||
**Example Graphite Request**:
|
||||
|
||||
POST /api/datasources HTTP/1.1
|
||||
Accept: application/json
|
||||
@ -152,6 +152,28 @@ parent = "http_api"
|
||||
"basicAuth":false
|
||||
}
|
||||
|
||||
**Example CloudWatch Request**:
|
||||
```
|
||||
POST /api/datasources HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
{
|
||||
"name": "test_datasource",
|
||||
"type": "cloudwatch",
|
||||
"url": "http://monitoring.us-west-1.amazonaws.com",
|
||||
"access": "proxy",
|
||||
"jsonData": {
|
||||
"authType": "keys",
|
||||
"defaultRegion": "us-west-1"
|
||||
},
|
||||
"secureJsonData": {
|
||||
"accessKey": "Ol4pIDpeKSA6XikgOl4p",
|
||||
"secretKey": "dGVzdCBrZXkgYmxlYXNlIGRvbid0IHN0ZWFs"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
|
@ -20,9 +20,9 @@ parent = "http_api"
|
||||
GET /api/users HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`.
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`. Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@ -55,10 +55,12 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/search?perpage=10&page=1&query=mygraf HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`. The `totalCount` field in the response can be used for pagination of the user list E.g. if `totalCount` is equal to 100 users and the `perpage` parameter is set to 10 then there are 10 pages of users. The `query` parameter is optional and it will return results where the query value is contained in one of the `name`, `login` or `email` fields. Query values with spaces need to be url encoded e.g. `query=Jane%20Doe`.
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
@ -94,7 +96,9 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@ -126,7 +130,9 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/lookup?loginOrEmail=admin HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@ -152,7 +158,7 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
PUT /api/users/2 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
{
|
||||
"email":"user@mygraf.com",
|
||||
@ -161,6 +167,8 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
"theme":"light"
|
||||
}
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
@ -178,7 +186,9 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/1/orgs HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@ -246,11 +256,29 @@ Changes the password for the user
|
||||
|
||||
{"message":"User password changed"}
|
||||
|
||||
## Switch user context
|
||||
## Switch user context for a specified user
|
||||
|
||||
`POST /api/user/using/:organisationId`
|
||||
`POST /api/users/:userId/using/:organizationId`
|
||||
|
||||
Switch user context to the given organisation.
|
||||
Switch user context to the given organization. Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
POST /api/users/7/using/2 HTTP/1.1
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Active organization changed"}
|
||||
|
||||
## Switch user context for signed in user
|
||||
|
||||
`POST /api/user/using/:organizationId`
|
||||
|
||||
Switch user context to the given organization.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
|
@ -15,6 +15,12 @@ weight = 1
|
||||
The Grafana back-end has a number of configuration options that can be
|
||||
specified in a `.ini` configuration file or specified using environment variables.
|
||||
|
||||
## Comments In .ini Files
|
||||
|
||||
Semicolons (the `;` char) are the standard way to comment out lines in a `.ini` file.
|
||||
|
||||
A common problem is forgetting to uncomment a line in the `custom.ini` (or `grafana.ini`) file which causes the configuration option to be ignored.
|
||||
|
||||
## Config file locations
|
||||
|
||||
- Default configuration from `$WORKING_DIR/conf/defaults.ini`
|
||||
@ -206,7 +212,7 @@ For MySQL, use either `true`, `false`, or `skip-verify`.
|
||||
### max_idle_conn
|
||||
The maximum number of connections in the idle connection pool.
|
||||
|
||||
### max_open_conn
|
||||
### max_open_conn
|
||||
The maximum number of open connections to the database.
|
||||
|
||||
<hr />
|
||||
@ -246,13 +252,13 @@ Define a white list of allowed ips/domains to use in data sources. Format: `ip_o
|
||||
### allow_sign_up
|
||||
|
||||
Set to `false` to prohibit users from being able to sign up / create
|
||||
user accounts. Defaults to `true`. The admin user can still create
|
||||
user accounts. Defaults to `false`. The admin user can still create
|
||||
users from the [Grafana Admin Pages](../../reference/admin)
|
||||
|
||||
### allow_org_create
|
||||
|
||||
Set to `false` to prohibit users from creating new organizations.
|
||||
Defaults to `true`.
|
||||
Defaults to `false`.
|
||||
|
||||
### auto_assign_org
|
||||
|
||||
|
@ -15,7 +15,8 @@ weight = 1
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for Debian-based Linux | [grafana_4.4.1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.1_amd64.deb)
|
||||
Stable for Debian-based Linux | [grafana_4.4.3_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb)
|
||||
Beta for Debian-based Linux | [grafana_4.5.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@ -23,20 +24,18 @@ installation.
|
||||
## Install Stable
|
||||
|
||||
```bash
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.1_amd64.deb
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_4.4.1_amd64.deb
|
||||
sudo dpkg -i grafana_4.4.3_amd64.deb
|
||||
```
|
||||
|
||||
<!--
|
||||
## Install Beta
|
||||
## Install Latest Beta
|
||||
|
||||
```bash
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.3.0-beta1_amd64.deb
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_4.3.0-beta1_amd64.deb
|
||||
sudo dpkg -i grafana_4.5.0-beta1_amd64.deb
|
||||
```
|
||||
-->
|
||||
|
||||
## APT Repository
|
||||
|
||||
|
@ -14,7 +14,7 @@ weight = 4
|
||||
|
||||
Grafana is very easy to install and run using the offical docker container.
|
||||
|
||||
$ docker run -i -p 3000:3000 grafana/grafana
|
||||
$ docker run -d -p 3000:3000 grafana/grafana
|
||||
|
||||
All Grafana configuration settings can be defined using environment
|
||||
variables, this is especially useful when using the above container.
|
||||
|
@ -15,7 +15,8 @@ weight = 2
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.4.1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.1-1.x86_64.rpm)
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.4.3 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm)
|
||||
Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-beta1.x86_64.rpm)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@ -24,19 +25,19 @@ installation.
|
||||
|
||||
You can install Grafana using Yum directly.
|
||||
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.1-1.x86_64.rpm
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
Or install manually using `rpm`.
|
||||
|
||||
#### On CentOS / Fedora / Redhat:
|
||||
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.1-1.x86_64.rpm
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
|
||||
$ sudo yum install initscripts fontconfig
|
||||
$ sudo rpm -Uvh grafana-4.4.1-1.x86_64.rpm
|
||||
$ sudo rpm -Uvh grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
#### On OpenSuse:
|
||||
|
||||
$ sudo rpm -i --nodeps grafana-4.4.1-1.x86_64.rpm
|
||||
$ sudo rpm -i --nodeps grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
## Install via YUM Repository
|
||||
|
||||
|
@ -11,13 +11,18 @@ weight = 8
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
## visualization & query issues
|
||||
## Visualization & Query issues
|
||||
|
||||
{{< imgbox max-width="40%" img="/img/docs/v45/query_inspector.png" caption="Query Inspector" >}}
|
||||
|
||||
The most common problems are related to the query & response from you data source. Even if it looks
|
||||
like a bug or visualization issue in Grafana it is 99% of time a problem with the data source query or
|
||||
the data source response.
|
||||
|
||||
So make sure to check the query sent and the raw response, learn how in this guide: [How to troubleshoot metric query issues](https://community.grafana.com/t/how-to-troubleshoot-metric-query-issues/50)
|
||||
To check this you should use Query Inspector (new in Grafana v4.5). The query Inspector shows query requests and responses.
|
||||
|
||||
For more on the query insector read [this guide here](https://community.grafana.com/t/using-grafanas-query-inspector-to-troubleshoot-issues/2630). For
|
||||
older versions of Grafana read the [how troubleshoot metric query issue](https://community.grafana.com/t/how-to-troubleshoot-metric-query-issues/50/2) article.
|
||||
|
||||
## Logging
|
||||
|
||||
|
@ -13,7 +13,7 @@ weight = 3
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Latest stable package for Windows | [grafana.4.4.1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.1.windows-x64.zip)
|
||||
Latest stable package for Windows | [grafana.4.4.3.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3.windows-x64.zip)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
|
@ -17,7 +17,7 @@ you can get title, tags, and text information for the event.
|
||||
|
||||
## Queries
|
||||
|
||||
Annotatation events are fetched via annotation queries. To add a new annotation query to a dashboard
|
||||
Annotation events are fetched via annotation queries. To add a new annotation query to a dashboard
|
||||
open the dashboard settings menu, then select `Annotations`. This will open the dashboard annotations
|
||||
settings view. To create a new annotation query hit the `New` button.
|
||||
|
||||
|
@ -88,7 +88,7 @@ The query expressions are different for each data source.
|
||||
- [Elasticsearch templating queries]({{< relref "features/datasources/elasticsearch.md#templating" >}})
|
||||
- [InfluxDB templating queries]({{< relref "features/datasources/influxdb.md#templating" >}})
|
||||
- [Prometheus templating queries]({{< relref "features/datasources/prometheus.md#templating" >}})
|
||||
- [OpenTSDB templating queries]({{< relref "features/datasources/prometheus.md#templating" >}})
|
||||
- [OpenTSDB templating queries]({{< relref "features/datasources/opentsdb.md#templating" >}})
|
||||
|
||||
One thing to note is that query expressions can contain references to other variables and in effect create linked variables.
|
||||
Grafana will detect this and automatically refresh a variable when one of it's containing variables change.
|
||||
@ -97,7 +97,7 @@ Grafana will detect this and automatically refresh a variable when one of it's c
|
||||
|
||||
Option | Description
|
||||
------- | --------
|
||||
*Mulit-value* | If enabled, the variable will support the selection of multiple options at the same time.
|
||||
*Multi-value* | If enabled, the variable will support the selection of multiple options at the same time.
|
||||
*Include All option* | Add a special `All` option whose value includes all options.
|
||||
*Custom all value* | By default the `All` value will include all options in combined expression. This can become very long and can have performance problems. Many times it can be better to specify a custom all value, like a wildcard regex. To make it possible to have custom regex, globs or lucene syntax in the **Custom all value** option it is never escaped so you will have to think avbout what is a valid value for your data source.
|
||||
|
||||
@ -177,6 +177,10 @@ This is used in the WHERE clause for the InfluxDB data source. Grafana adds it a
|
||||
|
||||
The `$__timeFilter` is used in the MySQL data source.
|
||||
|
||||
### The $__name Variable
|
||||
|
||||
This variable is only available in the Singlestat panel and can be used in the prefix or suffix fields on the Options tab. The variable will be replaced with the series name or alias.
|
||||
|
||||
## Repeating Panels
|
||||
|
||||
Template variables can be very useful to dynamically change your queries across a whole dashboard. If you want
|
||||
|
@ -1,4 +1,4 @@
|
||||
{
|
||||
"stable": "4.2.0",
|
||||
"testing": "4.2.0"
|
||||
"stable": "4.4.1",
|
||||
"testing": "4.4.1"
|
||||
}
|
||||
|
10
package.json
10
package.json
@ -1,10 +1,10 @@
|
||||
{
|
||||
"author": {
|
||||
"name": "Torkel Ödegaard",
|
||||
"company": "Coding Instinct AB"
|
||||
"company": "Grafana Labs"
|
||||
},
|
||||
"name": "grafana",
|
||||
"version": "5.0.0-pre1",
|
||||
"version": "4.5.0-beta1",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "http://github.com/grafana/grafana.git"
|
||||
@ -33,7 +33,7 @@
|
||||
"grunt-ng-annotate": "^3.0.0",
|
||||
"grunt-notify": "^0.4.5",
|
||||
"grunt-postcss": "^0.8.0",
|
||||
"grunt-sass": "^1.2.1",
|
||||
"grunt-sass": "^2.0.0",
|
||||
"grunt-string-replace": "~1.3.1",
|
||||
"grunt-systemjs-builder": "^0.2.7",
|
||||
"grunt-usemin": "3.1.1",
|
||||
@ -59,10 +59,12 @@
|
||||
},
|
||||
"scripts": {
|
||||
"build": "./node_modules/grunt-cli/bin/grunt",
|
||||
"test": "./node_modules/grunt-cli/bin/grunt test"
|
||||
"test": "./node_modules/grunt-cli/bin/grunt test",
|
||||
"dev": "./node_modules/grunt-cli/bin/grunt && ./node_modules/grunt-cli/bin/grunt watch"
|
||||
},
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"ace-builds": "^1.2.8",
|
||||
"eventemitter3": "^2.0.2",
|
||||
"gaze": "^1.1.2",
|
||||
"grunt-jscs": "3.0.1",
|
||||
|
@ -6,6 +6,7 @@ set -e
|
||||
|
||||
IS_UPGRADE=false
|
||||
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
[ -z "$GRAFANA_USER" ] && GRAFANA_USER="grafana"
|
||||
|
@ -17,3 +17,6 @@ CONF_FILE=/etc/grafana/grafana.ini
|
||||
RESTART_ON_UPGRADE=true
|
||||
|
||||
PLUGINS_DIR=/var/lib/grafana/plugins
|
||||
|
||||
# Only used on systemd systems
|
||||
PID_FILE_DIR=/var/run/grafana
|
||||
|
@ -12,11 +12,13 @@ Group=grafana
|
||||
Type=simple
|
||||
Restart=on-failure
|
||||
WorkingDirectory=/usr/share/grafana
|
||||
ExecStart=/usr/sbin/grafana-server \
|
||||
--config=${CONF_FILE} \
|
||||
--pidfile=${PID_FILE} \
|
||||
cfg:default.paths.logs=${LOG_DIR} \
|
||||
cfg:default.paths.data=${DATA_DIR} \
|
||||
RuntimeDirectory=grafana
|
||||
RuntimeDirectoryMode=0750
|
||||
ExecStart=/usr/sbin/grafana-server \
|
||||
--config=${CONF_FILE} \
|
||||
--pidfile=${PID_FILE_DIR}/grafana-server.pid \
|
||||
cfg:default.paths.logs=${LOG_DIR} \
|
||||
cfg:default.paths.data=${DATA_DIR} \
|
||||
cfg:default.paths.plugins=${PLUGINS_DIR}
|
||||
LimitNOFILE=10000
|
||||
TimeoutStopSec=20
|
||||
|
@ -1,13 +1,15 @@
|
||||
#! /usr/bin/env bash
|
||||
version=4.4.1
|
||||
version=4.4.2
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
|
||||
|
||||
package_cloud push grafana/stable/debian/jessie grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/stable/debian/wheezy grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/stable/debian/stretch grafana_${version}_amd64.deb
|
||||
|
||||
package_cloud push grafana/testing/debian/jessie grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/testing/debian/wheezy grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/testing/debian/stretch grafana_${version}_amd64.deb
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-${version}-1.x86_64.rpm
|
||||
|
||||
@ -16,3 +18,5 @@ package_cloud push grafana/testing/el/7 grafana-${version}-1.x86_64.rpm
|
||||
|
||||
package_cloud push grafana/stable/el/7 grafana-${version}-1.x86_64.rpm
|
||||
package_cloud push grafana/stable/el/6 grafana-${version}-1.x86_64.rpm
|
||||
|
||||
rm grafana*.{deb,rpm}
|
||||
|
@ -1,14 +1,16 @@
|
||||
#! /usr/bin/env bash
|
||||
deb_ver=4.3.0-beta1
|
||||
rpm_ver=4.3.0-beta1
|
||||
deb_ver=4.5.0-beta1
|
||||
rpm_ver=4.5.0-beta1
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${deb_ver}_amd64.deb
|
||||
# wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${deb_ver}_amd64.deb
|
||||
|
||||
package_cloud push grafana/testing/debian/jessie grafana_${deb_ver}_amd64.deb
|
||||
# package_cloud push grafana/testing/debian/jessie grafana_${deb_ver}_amd64.deb
|
||||
package_cloud push grafana/testing/debian/wheezy grafana_${deb_ver}_amd64.deb
|
||||
package_cloud push grafana/testing/debian/stretch grafana_${deb_ver}_amd64.deb
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-${rpm_ver}.x86_64.rpm
|
||||
|
||||
package_cloud push grafana/testing/el/6 grafana-${rpm_ver}.x86_64.rpm
|
||||
package_cloud push grafana/testing/el/7 grafana-${rpm_ver}.x86_64.rpm
|
||||
|
||||
rm grafana*.{deb,rpm}
|
||||
|
@ -25,6 +25,7 @@ stopGrafana() {
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Initial installation: $1 == 1
|
||||
# Upgrade: $1 == 2, and configured to restart on upgrade
|
||||
if [ $1 -eq 1 ] ; then
|
||||
|
@ -17,3 +17,6 @@ CONF_FILE=/etc/grafana/grafana.ini
|
||||
RESTART_ON_UPGRADE=true
|
||||
|
||||
PLUGINS_DIR=/var/lib/grafana/plugins
|
||||
|
||||
# Only used on systemd systems
|
||||
PID_FILE_DIR=/var/run/grafana
|
||||
|
@ -12,11 +12,13 @@ Group=grafana
|
||||
Type=simple
|
||||
Restart=on-failure
|
||||
WorkingDirectory=/usr/share/grafana
|
||||
ExecStart=/usr/sbin/grafana-server \
|
||||
--config=${CONF_FILE} \
|
||||
--pidfile=${PID_FILE} \
|
||||
cfg:default.paths.logs=${LOG_DIR} \
|
||||
cfg:default.paths.data=${DATA_DIR} \
|
||||
RuntimeDirectory=grafana
|
||||
RuntimeDirectoryMode=0750
|
||||
ExecStart=/usr/sbin/grafana-server \
|
||||
--config=${CONF_FILE} \
|
||||
--pidfile=${PID_FILE_DIR}/grafana-server.pid \
|
||||
cfg:default.paths.logs=${LOG_DIR} \
|
||||
cfg:default.paths.data=${DATA_DIR} \
|
||||
cfg:default.paths.plugins=${PLUGINS_DIR}
|
||||
LimitNOFILE=10000
|
||||
TimeoutStopSec=20
|
||||
|
@ -209,7 +209,7 @@ func (hs *HttpServer) registerRoutes() {
|
||||
|
||||
r.Get("/plugins", wrap(GetPluginList))
|
||||
r.Get("/plugins/:pluginId/settings", wrap(GetPluginSettingById))
|
||||
r.Get("/plugins/:pluginId/readme", wrap(GetPluginReadme))
|
||||
r.Get("/plugins/:pluginId/markdown/:name", wrap(GetPluginMarkdown))
|
||||
|
||||
r.Group("/plugins", func() {
|
||||
r.Get("/:pluginId/dashboards/", wrap(GetPluginDashboards))
|
||||
@ -217,12 +217,13 @@ func (hs *HttpServer) registerRoutes() {
|
||||
}, reqOrgAdmin)
|
||||
|
||||
r.Get("/frontend/settings/", GetFrontendSettings)
|
||||
r.Any("/datasources/proxy/:id/*", reqSignedIn, ProxyDataSourceRequest)
|
||||
r.Any("/datasources/proxy/:id", reqSignedIn, ProxyDataSourceRequest)
|
||||
r.Any("/datasources/proxy/:id/*", reqSignedIn, hs.ProxyDataSourceRequest)
|
||||
r.Any("/datasources/proxy/:id", reqSignedIn, hs.ProxyDataSourceRequest)
|
||||
|
||||
// Dashboard
|
||||
r.Group("/dashboards", func() {
|
||||
r.Combo("/db/:slug").Get(GetDashboard).Delete(DeleteDashboard)
|
||||
r.Get("/db/:slug", GetDashboard)
|
||||
r.Delete("/db/:slug", reqEditorRole, DeleteDashboard)
|
||||
|
||||
r.Get("/id/:dashboardId/versions", wrap(GetDashboardVersions))
|
||||
r.Get("/id/:dashboardId/versions/:id", wrap(GetDashboardVersion))
|
||||
|
@ -32,8 +32,7 @@ func InitAppPluginRoutes(r *macaron.Macaron) {
|
||||
url := util.JoinUrlFragments("/api/plugin-proxy/"+plugin.Id, route.Path)
|
||||
handlers := make([]macaron.Handler, 0)
|
||||
handlers = append(handlers, middleware.Auth(&middleware.AuthOptions{
|
||||
ReqSignedIn: true,
|
||||
ReqGrafanaAdmin: route.ReqGrafanaAdmin,
|
||||
ReqSignedIn: true,
|
||||
}))
|
||||
|
||||
if route.ReqRole != "" {
|
||||
|
@ -217,7 +217,10 @@ func (this *thunderTask) Fetch() {
|
||||
this.Done()
|
||||
}
|
||||
|
||||
var client = &http.Client{}
|
||||
var client *http.Client = &http.Client{
|
||||
Timeout: time.Second * 2,
|
||||
Transport: &http.Transport{Proxy: http.ProxyFromEnvironment},
|
||||
}
|
||||
|
||||
func (this *thunderTask) fetch() error {
|
||||
this.Avatar.timestamp = time.Now()
|
||||
|
@ -39,6 +39,7 @@ type cwRequest struct {
|
||||
type datasourceInfo struct {
|
||||
Profile string
|
||||
Region string
|
||||
AuthType string
|
||||
AssumeRoleArn string
|
||||
Namespace string
|
||||
|
||||
@ -47,6 +48,7 @@ type datasourceInfo struct {
|
||||
}
|
||||
|
||||
func (req *cwRequest) GetDatasourceInfo() *datasourceInfo {
|
||||
authType := req.DataSource.JsonData.Get("authType").MustString()
|
||||
assumeRoleArn := req.DataSource.JsonData.Get("assumeRoleArn").MustString()
|
||||
accessKey := ""
|
||||
secretKey := ""
|
||||
@ -61,6 +63,7 @@ func (req *cwRequest) GetDatasourceInfo() *datasourceInfo {
|
||||
}
|
||||
|
||||
return &datasourceInfo{
|
||||
AuthType: authType,
|
||||
AssumeRoleArn: assumeRoleArn,
|
||||
Region: req.Region,
|
||||
Profile: req.DataSource.Database,
|
||||
@ -110,7 +113,7 @@ func getCredentials(dsInfo *datasourceInfo) (*credentials.Credentials, error) {
|
||||
sessionToken := ""
|
||||
var expiration *time.Time
|
||||
expiration = nil
|
||||
if strings.Index(dsInfo.AssumeRoleArn, "arn:aws:iam:") == 0 {
|
||||
if dsInfo.AuthType == "arn" && strings.Index(dsInfo.AssumeRoleArn, "arn:aws:iam:") == 0 {
|
||||
params := &sts.AssumeRoleInput{
|
||||
RoleArn: aws.String(dsInfo.AssumeRoleArn),
|
||||
RoleSessionName: aws.String("GrafanaSession"),
|
||||
@ -166,7 +169,7 @@ func getCredentials(dsInfo *datasourceInfo) (*credentials.Credentials, error) {
|
||||
SecretAccessKey: dsInfo.SecretKey,
|
||||
}},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: dsInfo.Profile},
|
||||
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute},
|
||||
remoteCredProvider(sess),
|
||||
})
|
||||
|
||||
credentialCacheLock.Lock()
|
||||
|
@ -42,7 +42,7 @@ func init() {
|
||||
"AWS/EC2Spot": {"AvailableInstancePoolsCount", "BidsSubmittedForCapacity", "EligibleInstancePoolCount", "FulfilledCapacity", "MaxPercentCapacityAllocation", "PendingCapacity", "PercentCapacityAllocation", "TargetCapacity", "TerminatingCapacity"},
|
||||
"AWS/ECS": {"CPUReservation", "MemoryReservation", "CPUUtilization", "MemoryUtilization"},
|
||||
"AWS/EFS": {"BurstCreditBalance", "ClientConnections", "DataReadIOBytes", "DataWriteIOBytes", "MetadataIOBytes", "TotalIOBytes", "PermittedThroughput", "PercentIOLimit"},
|
||||
"AWS/ELB": {"HealthyHostCount", "UnHealthyHostCount", "RequestCount", "Latency", "HTTPCode_ELB_4XX", "HTTPCode_ELB_5XX", "HTTPCode_Backend_2XX", "HTTPCode_Backend_3XX", "HTTPCode_Backend_4XX", "HTTPCode_Backend_5XX", "BackendConnectionErrors", "SurgeQueueLength", "SpilloverCount"},
|
||||
"AWS/ELB": {"HealthyHostCount", "UnHealthyHostCount", "RequestCount", "Latency", "HTTPCode_ELB_4XX", "HTTPCode_ELB_5XX", "HTTPCode_Backend_2XX", "HTTPCode_Backend_3XX", "HTTPCode_Backend_4XX", "HTTPCode_Backend_5XX", "BackendConnectionErrors", "SurgeQueueLength", "SpilloverCount", "EstimatedALBActiveConnectionCount", "EstimatedALBConsumedLCUs", "EstimatedALBNewConnectionCount", "EstimatedProcessedBytes"},
|
||||
"AWS/ElastiCache": {
|
||||
"CPUUtilization", "FreeableMemory", "NetworkBytesIn", "NetworkBytesOut", "SwapUsage",
|
||||
"BytesUsedForCacheItems", "BytesReadIntoMemcached", "BytesWrittenOutFromMemcached", "CasBadval", "CasHits", "CasMisses", "CmdFlush", "CmdGet", "CmdSet", "CurrConnections", "CurrItems", "DecrHits", "DecrMisses", "DeleteHits", "DeleteMisses", "Evictions", "GetHits", "GetMisses", "IncrHits", "IncrMisses", "Reclaimed",
|
||||
@ -78,6 +78,7 @@ func init() {
|
||||
"AWS/Lambda": {"Invocations", "Errors", "Duration", "Throttles", "IteratorAge"},
|
||||
"AWS/Logs": {"IncomingBytes", "IncomingLogEvents", "ForwardedBytes", "ForwardedLogEvents", "DeliveryErrors", "DeliveryThrottling"},
|
||||
"AWS/ML": {"PredictCount", "PredictFailureCount"},
|
||||
"AWS/NATGateway": {"PacketsOutToDestination", "PacketsOutToSource", "PacketsInFromSource", "PacketsInFromDestination", "BytesOutToDestination", "BytesOutToSource", "BytesInFromSource", "BytesInFromDestination", "ErrorPortAllocation", "ActiveConnectionCount", "ConnectionAttemptCount", "ConnectionEstablishedCount", "IdleTimeoutCount", "PacketsDropCount"},
|
||||
"AWS/OpsWorks": {"cpu_idle", "cpu_nice", "cpu_system", "cpu_user", "cpu_waitio", "load_1", "load_5", "load_15", "memory_buffers", "memory_cached", "memory_free", "memory_swap", "memory_total", "memory_used", "procs"},
|
||||
"AWS/Redshift": {"CPUUtilization", "DatabaseConnections", "HealthStatus", "MaintenanceMode", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "PercentageDiskSpaceUsed", "ReadIOPS", "ReadLatency", "ReadThroughput", "WriteIOPS", "WriteLatency", "WriteThroughput"},
|
||||
"AWS/RDS": {"ActiveTransactions", "AuroraBinlogReplicaLag", "AuroraReplicaLag", "AuroraReplicaLagMaximum", "AuroraReplicaLagMinimum", "BinLogDiskUsage", "BlockedTransactions", "BufferCacheHitRatio", "CommitLatency", "CommitThroughput", "BinLogDiskUsage", "CPUCreditBalance", "CPUCreditUsage", "CPUUtilization", "DatabaseConnections", "DDLLatency", "DDLThroughput", "Deadlocks", "DeleteLatency", "DeleteThroughput", "DiskQueueDepth", "DMLLatency", "DMLThroughput", "EngineUptime", "FailedSqlStatements", "FreeableMemory", "FreeLocalStorage", "FreeStorageSpace", "InsertLatency", "InsertThroughput", "LoginFailures", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "NetworkThroughput", "Queries", "ReadIOPS", "ReadLatency", "ReadThroughput", "ReplicaLag", "ResultSetCacheHitRatio", "SelectLatency", "SelectThroughput", "SwapUsage", "TotalConnections", "UpdateLatency", "UpdateThroughput", "VolumeBytesUsed", "VolumeReadIOPS", "VolumeWriteIOPS", "WriteIOPS", "WriteLatency", "WriteThroughput"},
|
||||
@ -91,7 +92,7 @@ func init() {
|
||||
"AWS/SWF": {"DecisionTaskScheduleToStartTime", "DecisionTaskStartToCloseTime", "DecisionTasksCompleted", "StartedDecisionTasksTimedOutOnClose", "WorkflowStartToCloseTime", "WorkflowsCanceled", "WorkflowsCompleted", "WorkflowsContinuedAsNew", "WorkflowsFailed", "WorkflowsTerminated", "WorkflowsTimedOut",
|
||||
"ActivityTaskScheduleToCloseTime", "ActivityTaskScheduleToStartTime", "ActivityTaskStartToCloseTime", "ActivityTasksCanceled", "ActivityTasksCompleted", "ActivityTasksFailed", "ScheduledActivityTasksTimedOutOnClose", "ScheduledActivityTasksTimedOutOnStart", "StartedActivityTasksTimedOutOnClose", "StartedActivityTasksTimedOutOnHeartbeat"},
|
||||
"AWS/VPN": {"TunnelState", "TunnelDataIn", "TunnelDataOut"},
|
||||
"AWS/WAF": {"AllowedRequests", "BlockedRequests", "CountedRequests"},
|
||||
"WAF": {"AllowedRequests", "BlockedRequests", "CountedRequests"},
|
||||
"AWS/WorkSpaces": {"Available", "Unhealthy", "ConnectionAttempt", "ConnectionSuccess", "ConnectionFailure", "SessionLaunchTime", "InSessionLatency", "SessionDisconnect"},
|
||||
"KMS": {"SecondsUntilKeyMaterialExpiration"},
|
||||
}
|
||||
@ -122,6 +123,7 @@ func init() {
|
||||
"AWS/Lambda": {"FunctionName", "Resource", "Version", "Alias"},
|
||||
"AWS/Logs": {"LogGroupName", "DestinationType", "FilterName"},
|
||||
"AWS/ML": {"MLModelId", "RequestMode"},
|
||||
"AWS/NATGateway": {"NatGatewayId"},
|
||||
"AWS/OpsWorks": {"StackId", "LayerId", "InstanceId"},
|
||||
"AWS/Redshift": {"NodeID", "ClusterIdentifier"},
|
||||
"AWS/RDS": {"DBInstanceIdentifier", "DBClusterIdentifier", "DatabaseClass", "EngineName", "Role"},
|
||||
@ -133,7 +135,7 @@ func init() {
|
||||
"AWS/StorageGateway": {"GatewayId", "GatewayName", "VolumeId"},
|
||||
"AWS/SWF": {"Domain", "WorkflowTypeName", "WorkflowTypeVersion", "ActivityTypeName", "ActivityTypeVersion"},
|
||||
"AWS/VPN": {"VpnId", "TunnelIpAddress"},
|
||||
"AWS/WAF": {"Rule", "WebACL"},
|
||||
"WAF": {"Rule", "WebACL"},
|
||||
"AWS/WorkSpaces": {"DirectoryId", "WorkspaceId"},
|
||||
"KMS": {"KeyId"},
|
||||
}
|
||||
@ -166,9 +168,7 @@ func handleGetNamespaces(req *cwRequest, c *middleware.Context) {
|
||||
|
||||
customNamespaces := req.DataSource.JsonData.Get("customMetricsNamespaces").MustString()
|
||||
if customNamespaces != "" {
|
||||
for _, key := range strings.Split(customNamespaces, ",") {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
keys = append(keys, strings.Split(customNamespaces, ",")...)
|
||||
}
|
||||
|
||||
sort.Sort(sort.StringSlice(keys))
|
||||
@ -292,11 +292,6 @@ func getAllMetrics(cwData *datasourceInfo) (cloudwatch.ListMetricsOutput, error)
|
||||
var metricsCacheLock sync.Mutex
|
||||
|
||||
func getMetricsForCustomMetrics(dsInfo *datasourceInfo, getAllMetrics func(*datasourceInfo) (cloudwatch.ListMetricsOutput, error)) ([]string, error) {
|
||||
result, err := getAllMetrics(dsInfo)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
metricsCacheLock.Lock()
|
||||
defer metricsCacheLock.Unlock()
|
||||
|
||||
@ -314,6 +309,10 @@ func getMetricsForCustomMetrics(dsInfo *datasourceInfo, getAllMetrics func(*data
|
||||
if customMetricsMetricsMap[dsInfo.Profile][dsInfo.Region][dsInfo.Namespace].Expire.After(time.Now()) {
|
||||
return customMetricsMetricsMap[dsInfo.Profile][dsInfo.Region][dsInfo.Namespace].Cache, nil
|
||||
}
|
||||
result, err := getAllMetrics(dsInfo)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
customMetricsMetricsMap[dsInfo.Profile][dsInfo.Region][dsInfo.Namespace].Cache = make([]string, 0)
|
||||
customMetricsMetricsMap[dsInfo.Profile][dsInfo.Region][dsInfo.Namespace].Expire = time.Now().Add(5 * time.Minute)
|
||||
|
||||
@ -330,11 +329,6 @@ func getMetricsForCustomMetrics(dsInfo *datasourceInfo, getAllMetrics func(*data
|
||||
var dimensionsCacheLock sync.Mutex
|
||||
|
||||
func getDimensionsForCustomMetrics(dsInfo *datasourceInfo, getAllMetrics func(*datasourceInfo) (cloudwatch.ListMetricsOutput, error)) ([]string, error) {
|
||||
result, err := getAllMetrics(dsInfo)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
dimensionsCacheLock.Lock()
|
||||
defer dimensionsCacheLock.Unlock()
|
||||
|
||||
@ -352,6 +346,10 @@ func getDimensionsForCustomMetrics(dsInfo *datasourceInfo, getAllMetrics func(*d
|
||||
if customMetricsDimensionsMap[dsInfo.Profile][dsInfo.Region][dsInfo.Namespace].Expire.After(time.Now()) {
|
||||
return customMetricsDimensionsMap[dsInfo.Profile][dsInfo.Region][dsInfo.Namespace].Cache, nil
|
||||
}
|
||||
result, err := getAllMetrics(dsInfo)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
customMetricsDimensionsMap[dsInfo.Profile][dsInfo.Region][dsInfo.Namespace].Cache = make([]string, 0)
|
||||
customMetricsDimensionsMap[dsInfo.Profile][dsInfo.Region][dsInfo.Namespace].Expire = time.Now().Add(5 * time.Minute)
|
||||
|
||||
|
@ -1,197 +1,60 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"strings"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api/cloudwatch"
|
||||
"github.com/grafana/grafana/pkg/api/pluginproxy"
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
)
|
||||
|
||||
var (
|
||||
dataproxyLogger log.Logger = log.New("data-proxy-log")
|
||||
)
|
||||
const HeaderNameNoBackendCache = "X-Grafana-NoCache"
|
||||
|
||||
func NewReverseProxy(ds *m.DataSource, proxyPath string, targetUrl *url.URL) *httputil.ReverseProxy {
|
||||
director := func(req *http.Request) {
|
||||
req.URL.Scheme = targetUrl.Scheme
|
||||
req.URL.Host = targetUrl.Host
|
||||
req.Host = targetUrl.Host
|
||||
func (hs *HttpServer) getDatasourceById(id int64, orgId int64, nocache bool) (*m.DataSource, error) {
|
||||
cacheKey := fmt.Sprintf("ds-%d", id)
|
||||
|
||||
reqQueryVals := req.URL.Query()
|
||||
|
||||
if ds.Type == m.DS_INFLUXDB_08 {
|
||||
req.URL.Path = util.JoinUrlFragments(targetUrl.Path, "db/"+ds.Database+"/"+proxyPath)
|
||||
reqQueryVals.Add("u", ds.User)
|
||||
reqQueryVals.Add("p", ds.Password)
|
||||
req.URL.RawQuery = reqQueryVals.Encode()
|
||||
} else if ds.Type == m.DS_INFLUXDB {
|
||||
req.URL.Path = util.JoinUrlFragments(targetUrl.Path, proxyPath)
|
||||
req.URL.RawQuery = reqQueryVals.Encode()
|
||||
if !ds.BasicAuth {
|
||||
req.Header.Del("Authorization")
|
||||
req.Header.Add("Authorization", util.GetBasicAuthHeader(ds.User, ds.Password))
|
||||
}
|
||||
} else {
|
||||
req.URL.Path = util.JoinUrlFragments(targetUrl.Path, proxyPath)
|
||||
}
|
||||
|
||||
if ds.BasicAuth {
|
||||
req.Header.Del("Authorization")
|
||||
req.Header.Add("Authorization", util.GetBasicAuthHeader(ds.BasicAuthUser, ds.BasicAuthPassword))
|
||||
}
|
||||
|
||||
dsAuth := req.Header.Get("X-DS-Authorization")
|
||||
if len(dsAuth) > 0 {
|
||||
req.Header.Del("X-DS-Authorization")
|
||||
req.Header.Del("Authorization")
|
||||
req.Header.Add("Authorization", dsAuth)
|
||||
}
|
||||
|
||||
// clear cookie headers
|
||||
req.Header.Del("Cookie")
|
||||
req.Header.Del("Set-Cookie")
|
||||
|
||||
// clear X-Forwarded Host/Port/Proto headers
|
||||
req.Header.Del("X-Forwarded-Host")
|
||||
req.Header.Del("X-Forwarded-Port")
|
||||
req.Header.Del("X-Forwarded-Proto")
|
||||
|
||||
// set X-Forwarded-For header
|
||||
if req.RemoteAddr != "" {
|
||||
remoteAddr, _, err := net.SplitHostPort(req.RemoteAddr)
|
||||
if err != nil {
|
||||
remoteAddr = req.RemoteAddr
|
||||
}
|
||||
if req.Header.Get("X-Forwarded-For") != "" {
|
||||
req.Header.Set("X-Forwarded-For", req.Header.Get("X-Forwarded-For")+", "+remoteAddr)
|
||||
} else {
|
||||
req.Header.Set("X-Forwarded-For", remoteAddr)
|
||||
if !nocache {
|
||||
if cached, found := hs.cache.Get(cacheKey); found {
|
||||
ds := cached.(*m.DataSource)
|
||||
if ds.OrgId == orgId {
|
||||
return ds, nil
|
||||
}
|
||||
}
|
||||
|
||||
// reqBytes, _ := httputil.DumpRequestOut(req, true);
|
||||
// log.Trace("Proxying datasource request: %s", string(reqBytes))
|
||||
}
|
||||
|
||||
return &httputil.ReverseProxy{Director: director, FlushInterval: time.Millisecond * 200}
|
||||
}
|
||||
|
||||
func getDatasource(id int64, orgId int64) (*m.DataSource, error) {
|
||||
query := m.GetDataSourceByIdQuery{Id: id, OrgId: orgId}
|
||||
if err := bus.Dispatch(&query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hs.cache.Set(cacheKey, query.Result, time.Second*5)
|
||||
return query.Result, nil
|
||||
}
|
||||
|
||||
func ProxyDataSourceRequest(c *middleware.Context) {
|
||||
func (hs *HttpServer) ProxyDataSourceRequest(c *middleware.Context) {
|
||||
c.TimeRequest(metrics.M_DataSource_ProxyReq_Timer)
|
||||
|
||||
ds, err := getDatasource(c.ParamsInt64(":id"), c.OrgId)
|
||||
nocache := c.Req.Header.Get(HeaderNameNoBackendCache) == "true"
|
||||
|
||||
ds, err := hs.getDatasourceById(c.ParamsInt64(":id"), c.OrgId, nocache)
|
||||
|
||||
if err != nil {
|
||||
c.JsonApiErr(500, "Unable to load datasource meta data", err)
|
||||
return
|
||||
}
|
||||
|
||||
if ds.Type == m.DS_INFLUXDB {
|
||||
if c.Query("db") != ds.Database {
|
||||
c.JsonApiErr(403, "Datasource is not configured to allow this database", nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if ds.Type == m.DS_CLOUDWATCH {
|
||||
cloudwatch.HandleRequest(c, ds)
|
||||
return
|
||||
}
|
||||
|
||||
targetUrl, _ := url.Parse(ds.Url)
|
||||
if !checkWhiteList(c, targetUrl.Host) {
|
||||
// find plugin
|
||||
plugin, ok := plugins.DataSources[ds.Type]
|
||||
if !ok {
|
||||
c.JsonApiErr(500, "Unable to find datasource plugin", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxyPath := c.Params("*")
|
||||
|
||||
if ds.Type == m.DS_PROMETHEUS {
|
||||
if c.Req.Request.Method != http.MethodGet || !strings.HasPrefix(proxyPath, "api/") {
|
||||
c.JsonApiErr(403, "GET is only allowed on proxied Prometheus datasource", nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if ds.Type == m.DS_ES {
|
||||
if c.Req.Request.Method == "DELETE" {
|
||||
c.JsonApiErr(403, "Deletes not allowed on proxied Elasticsearch datasource", nil)
|
||||
return
|
||||
}
|
||||
if c.Req.Request.Method == "PUT" {
|
||||
c.JsonApiErr(403, "Puts not allowed on proxied Elasticsearch datasource", nil)
|
||||
return
|
||||
}
|
||||
if c.Req.Request.Method == "POST" && proxyPath != "_msearch" {
|
||||
c.JsonApiErr(403, "Posts not allowed on proxied Elasticsearch datasource except on /_msearch", nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
proxy := NewReverseProxy(ds, proxyPath, targetUrl)
|
||||
proxy.Transport, err = ds.GetHttpTransport()
|
||||
if err != nil {
|
||||
c.JsonApiErr(400, "Unable to load TLS certificate", err)
|
||||
return
|
||||
}
|
||||
|
||||
logProxyRequest(ds.Type, c)
|
||||
proxy.ServeHTTP(c.Resp, c.Req.Request)
|
||||
c.Resp.Header().Del("Set-Cookie")
|
||||
}
|
||||
|
||||
func logProxyRequest(dataSourceType string, c *middleware.Context) {
|
||||
if !setting.DataProxyLogging {
|
||||
return
|
||||
}
|
||||
|
||||
var body string
|
||||
if c.Req.Request.Body != nil {
|
||||
buffer, err := ioutil.ReadAll(c.Req.Request.Body)
|
||||
if err == nil {
|
||||
c.Req.Request.Body = ioutil.NopCloser(bytes.NewBuffer(buffer))
|
||||
body = string(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
dataproxyLogger.Info("Proxying incoming request",
|
||||
"userid", c.UserId,
|
||||
"orgid", c.OrgId,
|
||||
"username", c.Login,
|
||||
"datasource", dataSourceType,
|
||||
"uri", c.Req.RequestURI,
|
||||
"method", c.Req.Request.Method,
|
||||
"body", body)
|
||||
}
|
||||
|
||||
func checkWhiteList(c *middleware.Context, host string) bool {
|
||||
if host != "" && len(setting.DataProxyWhiteList) > 0 {
|
||||
if _, exists := setting.DataProxyWhiteList[host]; !exists {
|
||||
c.JsonApiErr(403, "Data proxy hostname and ip are not included in whitelist", nil)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
proxy := pluginproxy.NewDataSourceProxy(ds, plugin, c, proxyPath)
|
||||
proxy.HandleRequest()
|
||||
}
|
||||
|
@ -1,63 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
)
|
||||
|
||||
func TestDataSourceProxy(t *testing.T) {
|
||||
Convey("When getting graphite datasource proxy", t, func() {
|
||||
ds := m.DataSource{Url: "htttp://graphite:8080", Type: m.DS_GRAPHITE}
|
||||
targetUrl, err := url.Parse(ds.Url)
|
||||
proxy := NewReverseProxy(&ds, "/render", targetUrl)
|
||||
proxy.Transport, err = ds.GetHttpTransport()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
transport, ok := proxy.Transport.(*http.Transport)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(transport.TLSClientConfig.InsecureSkipVerify, ShouldBeTrue)
|
||||
|
||||
requestUrl, _ := url.Parse("http://grafana.com/sub")
|
||||
req := http.Request{URL: requestUrl}
|
||||
|
||||
proxy.Director(&req)
|
||||
|
||||
Convey("Can translate request url and path", func() {
|
||||
So(req.URL.Host, ShouldEqual, "graphite:8080")
|
||||
So(req.URL.Path, ShouldEqual, "/render")
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When getting influxdb datasource proxy", t, func() {
|
||||
ds := m.DataSource{
|
||||
Type: m.DS_INFLUXDB_08,
|
||||
Url: "http://influxdb:8083",
|
||||
Database: "site",
|
||||
User: "user",
|
||||
Password: "password",
|
||||
}
|
||||
|
||||
targetUrl, _ := url.Parse(ds.Url)
|
||||
proxy := NewReverseProxy(&ds, "", targetUrl)
|
||||
|
||||
requestUrl, _ := url.Parse("http://grafana.com/sub")
|
||||
req := http.Request{URL: requestUrl}
|
||||
|
||||
proxy.Director(&req)
|
||||
|
||||
Convey("Should add db to url", func() {
|
||||
So(req.URL.Path, ShouldEqual, "/db/site/")
|
||||
})
|
||||
|
||||
Convey("Should add username and password", func() {
|
||||
queryVals := req.URL.Query()
|
||||
So(queryVals["u"][0], ShouldEqual, "user")
|
||||
So(queryVals["p"][0], ShouldEqual, "password")
|
||||
})
|
||||
})
|
||||
}
|
@ -31,7 +31,7 @@ type AdminUpdateUserPasswordForm struct {
|
||||
}
|
||||
|
||||
type AdminUpdateUserPermissionsForm struct {
|
||||
IsGrafanaAdmin bool `json:"isGrafanaAdmin" binding:"Required"`
|
||||
IsGrafanaAdmin bool `json:"isGrafanaAdmin"`
|
||||
}
|
||||
|
||||
type AdminUserListItem struct {
|
||||
|
@ -131,17 +131,20 @@ func getFrontendSettingsMap(c *middleware.Context) (map[string]interface{}, erro
|
||||
}
|
||||
|
||||
jsonObj := map[string]interface{}{
|
||||
"defaultDatasource": defaultDatasource,
|
||||
"datasources": datasources,
|
||||
"panels": panels,
|
||||
"appSubUrl": setting.AppSubUrl,
|
||||
"allowOrgCreate": (setting.AllowUserOrgCreate && c.IsSignedIn) || c.IsGrafanaAdmin,
|
||||
"authProxyEnabled": setting.AuthProxyEnabled,
|
||||
"ldapEnabled": setting.LdapEnabled,
|
||||
"alertingEnabled": setting.AlertingEnabled,
|
||||
"googleAnalyticsId": setting.GoogleAnalyticsId,
|
||||
"disableLoginForm": setting.DisableLoginForm,
|
||||
"disableSignoutMenu": setting.DisableSignoutMenu,
|
||||
"defaultDatasource": defaultDatasource,
|
||||
"datasources": datasources,
|
||||
"panels": panels,
|
||||
"appSubUrl": setting.AppSubUrl,
|
||||
"allowOrgCreate": (setting.AllowUserOrgCreate && c.IsSignedIn) || c.IsGrafanaAdmin,
|
||||
"authProxyEnabled": setting.AuthProxyEnabled,
|
||||
"ldapEnabled": setting.LdapEnabled,
|
||||
"alertingEnabled": setting.AlertingEnabled,
|
||||
"googleAnalyticsId": setting.GoogleAnalyticsId,
|
||||
"disableLoginForm": setting.DisableLoginForm,
|
||||
"disableSignoutMenu": setting.DisableSignoutMenu,
|
||||
"externalUserMngInfo": setting.ExternalUserMngInfo,
|
||||
"externalUserMngLinkUrl": setting.ExternalUserMngLinkUrl,
|
||||
"externalUserMngLinkName": setting.ExternalUserMngLinkName,
|
||||
"buildInfo": map[string]interface{}{
|
||||
"version": setting.BuildVersion,
|
||||
"commit": setting.BuildCommit,
|
||||
|
@ -9,7 +9,9 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
gocache "github.com/patrickmn/go-cache"
|
||||
macaron "gopkg.in/macaron.v1"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api/live"
|
||||
@ -29,13 +31,15 @@ type HttpServer struct {
|
||||
macaron *macaron.Macaron
|
||||
context context.Context
|
||||
streamManager *live.StreamManager
|
||||
cache *gocache.Cache
|
||||
|
||||
httpSrv *http.Server
|
||||
}
|
||||
|
||||
func NewHttpServer() *HttpServer {
|
||||
return &HttpServer{
|
||||
log: log.New("http.server"),
|
||||
log: log.New("http.server"),
|
||||
cache: gocache.New(5*time.Minute, 10*time.Minute),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -137,12 +137,15 @@ func loginUserWithUser(user *m.User, c *middleware.Context) {
|
||||
log.Error(3, "User login with nil user")
|
||||
}
|
||||
|
||||
c.Resp.Header().Del("Set-Cookie")
|
||||
|
||||
days := 86400 * setting.LogInRememberDays
|
||||
if days > 0 {
|
||||
c.SetCookie(setting.CookieUserName, user.Login, days, setting.AppSubUrl+"/")
|
||||
c.SetSuperSecureCookie(user.Rands+user.Password, setting.CookieRememberName, user.Login, days, setting.AppSubUrl+"/")
|
||||
}
|
||||
|
||||
c.Session.RegenerateId(c)
|
||||
c.Session.Set(middleware.SESS_KEY_USERID, user.Id)
|
||||
}
|
||||
|
||||
|
349
pkg/api/pluginproxy/ds_proxy.go
Normal file
349
pkg/api/pluginproxy/ds_proxy.go
Normal file
@ -0,0 +1,349 @@
|
||||
package pluginproxy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api/cloudwatch"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
var (
|
||||
logger log.Logger = log.New("data-proxy-log")
|
||||
client *http.Client = &http.Client{
|
||||
Timeout: time.Second * 30,
|
||||
Transport: &http.Transport{Proxy: http.ProxyFromEnvironment},
|
||||
}
|
||||
tokenCache = map[int64]*jwtToken{}
|
||||
)
|
||||
|
||||
type jwtToken struct {
|
||||
ExpiresOn time.Time `json:"-"`
|
||||
ExpiresOnString string `json:"expires_on"`
|
||||
AccessToken string `json:"access_token"`
|
||||
}
|
||||
|
||||
type DataSourceProxy struct {
|
||||
ds *m.DataSource
|
||||
ctx *middleware.Context
|
||||
targetUrl *url.URL
|
||||
proxyPath string
|
||||
route *plugins.AppPluginRoute
|
||||
plugin *plugins.DataSourcePlugin
|
||||
}
|
||||
|
||||
func NewDataSourceProxy(ds *m.DataSource, plugin *plugins.DataSourcePlugin, ctx *middleware.Context, proxyPath string) *DataSourceProxy {
|
||||
targetUrl, _ := url.Parse(ds.Url)
|
||||
|
||||
return &DataSourceProxy{
|
||||
ds: ds,
|
||||
plugin: plugin,
|
||||
ctx: ctx,
|
||||
proxyPath: proxyPath,
|
||||
targetUrl: targetUrl,
|
||||
}
|
||||
}
|
||||
|
||||
func (proxy *DataSourceProxy) HandleRequest() {
|
||||
if proxy.ds.Type == m.DS_CLOUDWATCH {
|
||||
cloudwatch.HandleRequest(proxy.ctx, proxy.ds)
|
||||
return
|
||||
}
|
||||
|
||||
if err := proxy.validateRequest(); err != nil {
|
||||
proxy.ctx.JsonApiErr(403, err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
reverseProxy := &httputil.ReverseProxy{
|
||||
Director: proxy.getDirector(),
|
||||
FlushInterval: time.Millisecond * 200,
|
||||
}
|
||||
|
||||
var err error
|
||||
reverseProxy.Transport, err = proxy.ds.GetHttpTransport()
|
||||
if err != nil {
|
||||
proxy.ctx.JsonApiErr(400, "Unable to load TLS certificate", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxy.logRequest()
|
||||
|
||||
reverseProxy.ServeHTTP(proxy.ctx.Resp, proxy.ctx.Req.Request)
|
||||
proxy.ctx.Resp.Header().Del("Set-Cookie")
|
||||
}
|
||||
|
||||
func (proxy *DataSourceProxy) getDirector() func(req *http.Request) {
|
||||
return func(req *http.Request) {
|
||||
req.URL.Scheme = proxy.targetUrl.Scheme
|
||||
req.URL.Host = proxy.targetUrl.Host
|
||||
req.Host = proxy.targetUrl.Host
|
||||
|
||||
reqQueryVals := req.URL.Query()
|
||||
|
||||
if proxy.ds.Type == m.DS_INFLUXDB_08 {
|
||||
req.URL.Path = util.JoinUrlFragments(proxy.targetUrl.Path, "db/"+proxy.ds.Database+"/"+proxy.proxyPath)
|
||||
reqQueryVals.Add("u", proxy.ds.User)
|
||||
reqQueryVals.Add("p", proxy.ds.Password)
|
||||
req.URL.RawQuery = reqQueryVals.Encode()
|
||||
} else if proxy.ds.Type == m.DS_INFLUXDB {
|
||||
req.URL.Path = util.JoinUrlFragments(proxy.targetUrl.Path, proxy.proxyPath)
|
||||
req.URL.RawQuery = reqQueryVals.Encode()
|
||||
if !proxy.ds.BasicAuth {
|
||||
req.Header.Del("Authorization")
|
||||
req.Header.Add("Authorization", util.GetBasicAuthHeader(proxy.ds.User, proxy.ds.Password))
|
||||
}
|
||||
} else {
|
||||
req.URL.Path = util.JoinUrlFragments(proxy.targetUrl.Path, proxy.proxyPath)
|
||||
}
|
||||
|
||||
if proxy.ds.BasicAuth {
|
||||
req.Header.Del("Authorization")
|
||||
req.Header.Add("Authorization", util.GetBasicAuthHeader(proxy.ds.BasicAuthUser, proxy.ds.BasicAuthPassword))
|
||||
}
|
||||
|
||||
dsAuth := req.Header.Get("X-DS-Authorization")
|
||||
if len(dsAuth) > 0 {
|
||||
req.Header.Del("X-DS-Authorization")
|
||||
req.Header.Del("Authorization")
|
||||
req.Header.Add("Authorization", dsAuth)
|
||||
}
|
||||
|
||||
// clear cookie headers
|
||||
req.Header.Del("Cookie")
|
||||
req.Header.Del("Set-Cookie")
|
||||
|
||||
// clear X-Forwarded Host/Port/Proto headers
|
||||
req.Header.Del("X-Forwarded-Host")
|
||||
req.Header.Del("X-Forwarded-Port")
|
||||
req.Header.Del("X-Forwarded-Proto")
|
||||
|
||||
// set X-Forwarded-For header
|
||||
if req.RemoteAddr != "" {
|
||||
remoteAddr, _, err := net.SplitHostPort(req.RemoteAddr)
|
||||
if err != nil {
|
||||
remoteAddr = req.RemoteAddr
|
||||
}
|
||||
if req.Header.Get("X-Forwarded-For") != "" {
|
||||
req.Header.Set("X-Forwarded-For", req.Header.Get("X-Forwarded-For")+", "+remoteAddr)
|
||||
} else {
|
||||
req.Header.Set("X-Forwarded-For", remoteAddr)
|
||||
}
|
||||
}
|
||||
|
||||
if proxy.route != nil {
|
||||
proxy.applyRoute(req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (proxy *DataSourceProxy) validateRequest() error {
|
||||
if proxy.ds.Type == m.DS_INFLUXDB {
|
||||
if proxy.ctx.Query("db") != proxy.ds.Database {
|
||||
return errors.New("Datasource is not configured to allow this database")
|
||||
}
|
||||
}
|
||||
|
||||
if !checkWhiteList(proxy.ctx, proxy.targetUrl.Host) {
|
||||
return errors.New("Target url is not a valid target")
|
||||
}
|
||||
|
||||
if proxy.ds.Type == m.DS_PROMETHEUS {
|
||||
if proxy.ctx.Req.Request.Method != http.MethodGet || !strings.HasPrefix(proxy.proxyPath, "api/") {
|
||||
return errors.New("GET is only allowed on proxied Prometheus datasource")
|
||||
}
|
||||
}
|
||||
|
||||
if proxy.ds.Type == m.DS_ES {
|
||||
if proxy.ctx.Req.Request.Method == "DELETE" {
|
||||
return errors.New("Deletes not allowed on proxied Elasticsearch datasource")
|
||||
}
|
||||
if proxy.ctx.Req.Request.Method == "PUT" {
|
||||
return errors.New("Puts not allowed on proxied Elasticsearch datasource")
|
||||
}
|
||||
if proxy.ctx.Req.Request.Method == "POST" && proxy.proxyPath != "_msearch" {
|
||||
return errors.New("Posts not allowed on proxied Elasticsearch datasource except on /_msearch")
|
||||
}
|
||||
}
|
||||
|
||||
// found route if there are any
|
||||
if len(proxy.plugin.Routes) > 0 {
|
||||
for _, route := range proxy.plugin.Routes {
|
||||
// method match
|
||||
if route.Method != "" && route.Method != "*" && route.Method != proxy.ctx.Req.Method {
|
||||
continue
|
||||
}
|
||||
|
||||
if route.ReqRole.IsValid() {
|
||||
if !proxy.ctx.HasUserRole(route.ReqRole) {
|
||||
return errors.New("Plugin proxy route access denied")
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(proxy.proxyPath, route.Path) {
|
||||
proxy.route = route
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (proxy *DataSourceProxy) logRequest() {
|
||||
if !setting.DataProxyLogging {
|
||||
return
|
||||
}
|
||||
|
||||
var body string
|
||||
if proxy.ctx.Req.Request.Body != nil {
|
||||
buffer, err := ioutil.ReadAll(proxy.ctx.Req.Request.Body)
|
||||
if err == nil {
|
||||
proxy.ctx.Req.Request.Body = ioutil.NopCloser(bytes.NewBuffer(buffer))
|
||||
body = string(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Proxying incoming request",
|
||||
"userid", proxy.ctx.UserId,
|
||||
"orgid", proxy.ctx.OrgId,
|
||||
"username", proxy.ctx.Login,
|
||||
"datasource", proxy.ds.Type,
|
||||
"uri", proxy.ctx.Req.RequestURI,
|
||||
"method", proxy.ctx.Req.Request.Method,
|
||||
"body", body)
|
||||
}
|
||||
|
||||
func checkWhiteList(c *middleware.Context, host string) bool {
|
||||
if host != "" && len(setting.DataProxyWhiteList) > 0 {
|
||||
if _, exists := setting.DataProxyWhiteList[host]; !exists {
|
||||
c.JsonApiErr(403, "Data proxy hostname and ip are not included in whitelist", nil)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (proxy *DataSourceProxy) applyRoute(req *http.Request) {
|
||||
proxy.proxyPath = strings.TrimPrefix(proxy.proxyPath, proxy.route.Path)
|
||||
|
||||
data := templateData{
|
||||
JsonData: proxy.ds.JsonData.Interface().(map[string]interface{}),
|
||||
SecureJsonData: proxy.ds.SecureJsonData.Decrypt(),
|
||||
}
|
||||
|
||||
routeUrl, err := url.Parse(proxy.route.Url)
|
||||
if err != nil {
|
||||
logger.Error("Error parsing plugin route url")
|
||||
return
|
||||
}
|
||||
|
||||
req.URL.Scheme = routeUrl.Scheme
|
||||
req.URL.Host = routeUrl.Host
|
||||
req.Host = routeUrl.Host
|
||||
req.URL.Path = util.JoinUrlFragments(routeUrl.Path, proxy.proxyPath)
|
||||
|
||||
if err := addHeaders(&req.Header, proxy.route, data); err != nil {
|
||||
logger.Error("Failed to render plugin headers", "error", err)
|
||||
}
|
||||
|
||||
if proxy.route.TokenAuth != nil {
|
||||
if token, err := proxy.getAccessToken(data); err != nil {
|
||||
logger.Error("Failed to get access token", "error", err)
|
||||
} else {
|
||||
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Requesting", "url", req.URL.String())
|
||||
}
|
||||
|
||||
func (proxy *DataSourceProxy) getAccessToken(data templateData) (string, error) {
|
||||
if cachedToken, found := tokenCache[proxy.ds.Id]; found {
|
||||
if cachedToken.ExpiresOn.After(time.Now().Add(time.Second * 10)) {
|
||||
logger.Info("Using token from cache")
|
||||
return cachedToken.AccessToken, nil
|
||||
}
|
||||
}
|
||||
|
||||
urlInterpolated, err := interpolateString(proxy.route.TokenAuth.Url, data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
params := make(url.Values)
|
||||
for key, value := range proxy.route.TokenAuth.Params {
|
||||
if interpolatedParam, err := interpolateString(value, data); err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
params.Add(key, interpolatedParam)
|
||||
}
|
||||
}
|
||||
|
||||
getTokenReq, _ := http.NewRequest("POST", urlInterpolated, bytes.NewBufferString(params.Encode()))
|
||||
getTokenReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
getTokenReq.Header.Add("Content-Length", strconv.Itoa(len(params.Encode())))
|
||||
|
||||
resp, err := client.Do(getTokenReq)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
var token jwtToken
|
||||
if err := json.NewDecoder(resp.Body).Decode(&token); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
expiresOnEpoch, _ := strconv.ParseInt(token.ExpiresOnString, 10, 64)
|
||||
token.ExpiresOn = time.Unix(expiresOnEpoch, 0)
|
||||
tokenCache[proxy.ds.Id] = &token
|
||||
|
||||
logger.Info("Got new access token", "ExpiresOn", token.ExpiresOn)
|
||||
return token.AccessToken, nil
|
||||
}
|
||||
|
||||
func interpolateString(text string, data templateData) (string, error) {
|
||||
t, err := template.New("content").Parse(text)
|
||||
if err != nil {
|
||||
return "", errors.New(fmt.Sprintf("Could not parse template %s.", text))
|
||||
}
|
||||
|
||||
var contentBuf bytes.Buffer
|
||||
err = t.Execute(&contentBuf, data)
|
||||
if err != nil {
|
||||
return "", errors.New(fmt.Sprintf("Failed to execute template %s.", text))
|
||||
}
|
||||
|
||||
return contentBuf.String(), nil
|
||||
}
|
||||
|
||||
func addHeaders(reqHeaders *http.Header, route *plugins.AppPluginRoute, data templateData) error {
|
||||
for _, header := range route.Headers {
|
||||
interpolated, err := interpolateString(header.Content, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqHeaders.Add(header.Name, interpolated)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
165
pkg/api/pluginproxy/ds_proxy_test.go
Normal file
165
pkg/api/pluginproxy/ds_proxy_test.go
Normal file
@ -0,0 +1,165 @@
|
||||
package pluginproxy
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
macaron "gopkg.in/macaron.v1"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestDSRouteRule(t *testing.T) {
|
||||
|
||||
Convey("DataSourceProxy", t, func() {
|
||||
Convey("Plugin with routes", func() {
|
||||
plugin := &plugins.DataSourcePlugin{
|
||||
Routes: []*plugins.AppPluginRoute{
|
||||
{
|
||||
Path: "api/v4/",
|
||||
Url: "https://www.google.com",
|
||||
ReqRole: m.ROLE_EDITOR,
|
||||
Headers: []plugins.AppPluginRouteHeader{
|
||||
{Name: "x-header", Content: "my secret {{.SecureJsonData.key}}"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "api/admin",
|
||||
Url: "https://www.google.com",
|
||||
ReqRole: m.ROLE_ADMIN,
|
||||
Headers: []plugins.AppPluginRouteHeader{
|
||||
{Name: "x-header", Content: "my secret {{.SecureJsonData.key}}"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "api/anon",
|
||||
Url: "https://www.google.com",
|
||||
Headers: []plugins.AppPluginRouteHeader{
|
||||
{Name: "x-header", Content: "my secret {{.SecureJsonData.key}}"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
setting.SecretKey = "password"
|
||||
key, _ := util.Encrypt([]byte("123"), "password")
|
||||
|
||||
ds := &m.DataSource{
|
||||
JsonData: simplejson.NewFromAny(map[string]interface{}{
|
||||
"clientId": "asd",
|
||||
}),
|
||||
SecureJsonData: map[string][]byte{
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost/asd", nil)
|
||||
ctx := &middleware.Context{
|
||||
Context: &macaron.Context{
|
||||
Req: macaron.Request{Request: req},
|
||||
},
|
||||
SignedInUser: &m.SignedInUser{OrgRole: m.ROLE_EDITOR},
|
||||
}
|
||||
|
||||
Convey("When matching route path", func() {
|
||||
proxy := NewDataSourceProxy(ds, plugin, ctx, "api/v4/some/method")
|
||||
proxy.route = plugin.Routes[0]
|
||||
proxy.applyRoute(req)
|
||||
|
||||
Convey("should add headers and update url", func() {
|
||||
So(req.URL.String(), ShouldEqual, "https://www.google.com/some/method")
|
||||
So(req.Header.Get("x-header"), ShouldEqual, "my secret 123")
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Validating request", func() {
|
||||
Convey("plugin route with valid role", func() {
|
||||
proxy := NewDataSourceProxy(ds, plugin, ctx, "api/v4/some/method")
|
||||
err := proxy.validateRequest()
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("plugin route with admin role and user is editor", func() {
|
||||
proxy := NewDataSourceProxy(ds, plugin, ctx, "api/admin")
|
||||
err := proxy.validateRequest()
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("plugin route with admin role and user is admin", func() {
|
||||
ctx.SignedInUser.OrgRole = m.ROLE_ADMIN
|
||||
proxy := NewDataSourceProxy(ds, plugin, ctx, "api/admin")
|
||||
err := proxy.validateRequest()
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When proxying graphite", func() {
|
||||
plugin := &plugins.DataSourcePlugin{}
|
||||
ds := &m.DataSource{Url: "htttp://graphite:8080", Type: m.DS_GRAPHITE}
|
||||
ctx := &middleware.Context{}
|
||||
|
||||
proxy := NewDataSourceProxy(ds, plugin, ctx, "/render")
|
||||
|
||||
requestUrl, _ := url.Parse("http://grafana.com/sub")
|
||||
req := http.Request{URL: requestUrl}
|
||||
|
||||
proxy.getDirector()(&req)
|
||||
|
||||
Convey("Can translate request url and path", func() {
|
||||
So(req.URL.Host, ShouldEqual, "graphite:8080")
|
||||
So(req.URL.Path, ShouldEqual, "/render")
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When proxying InfluxDB", func() {
|
||||
plugin := &plugins.DataSourcePlugin{}
|
||||
|
||||
ds := &m.DataSource{
|
||||
Type: m.DS_INFLUXDB_08,
|
||||
Url: "http://influxdb:8083",
|
||||
Database: "site",
|
||||
User: "user",
|
||||
Password: "password",
|
||||
}
|
||||
|
||||
ctx := &middleware.Context{}
|
||||
proxy := NewDataSourceProxy(ds, plugin, ctx, "")
|
||||
|
||||
requestUrl, _ := url.Parse("http://grafana.com/sub")
|
||||
req := http.Request{URL: requestUrl}
|
||||
|
||||
proxy.getDirector()(&req)
|
||||
|
||||
Convey("Should add db to url", func() {
|
||||
So(req.URL.Path, ShouldEqual, "/db/site/")
|
||||
})
|
||||
|
||||
Convey("Should add username and password", func() {
|
||||
queryVals := req.URL.Query()
|
||||
So(queryVals["u"][0], ShouldEqual, "user")
|
||||
So(queryVals["p"][0], ShouldEqual, "password")
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When interpolating string", func() {
|
||||
data := templateData{
|
||||
SecureJsonData: map[string]string{
|
||||
"Test": "0asd+asd",
|
||||
},
|
||||
}
|
||||
|
||||
interpolated, err := interpolateString("{{.SecureJsonData.Test}}", data)
|
||||
So(err, ShouldBeNil)
|
||||
So(interpolated, ShouldEqual, "0asd+asd")
|
||||
})
|
||||
|
||||
})
|
||||
}
|
@ -1,15 +1,11 @@
|
||||
package pluginproxy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"text/template"
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
@ -38,23 +34,8 @@ func getHeaders(route *plugins.AppPluginRoute, orgId int64, appId string) (http.
|
||||
SecureJsonData: query.Result.SecureJsonData.Decrypt(),
|
||||
}
|
||||
|
||||
for _, header := range route.Headers {
|
||||
var contentBuf bytes.Buffer
|
||||
t, err := template.New("content").Parse(header.Content)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("could not parse header content template for header %s.", header.Name))
|
||||
}
|
||||
|
||||
err = t.Execute(&contentBuf, data)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("failed to execute header content template for header %s.", header.Name))
|
||||
}
|
||||
|
||||
log.Trace("Adding header to proxy request. %s: %s", header.Name, contentBuf.String())
|
||||
result.Add(header.Name, contentBuf.String())
|
||||
}
|
||||
|
||||
return result, nil
|
||||
err := addHeaders(&result, route, data)
|
||||
return result, err
|
||||
}
|
||||
|
||||
func NewApiPluginProxy(ctx *middleware.Context, proxyPath string, route *plugins.AppPluginRoute, appId string) *httputil.ReverseProxy {
|
||||
|
@ -147,15 +147,16 @@ func GetPluginDashboards(c *middleware.Context) Response {
|
||||
}
|
||||
}
|
||||
|
||||
func GetPluginReadme(c *middleware.Context) Response {
|
||||
func GetPluginMarkdown(c *middleware.Context) Response {
|
||||
pluginId := c.Params(":pluginId")
|
||||
name := c.Params(":name")
|
||||
|
||||
if content, err := plugins.GetPluginReadme(pluginId); err != nil {
|
||||
if content, err := plugins.GetPluginMarkdown(pluginId, name); err != nil {
|
||||
if notfound, ok := err.(plugins.PluginNotFoundError); ok {
|
||||
return ApiError(404, notfound.Error(), nil)
|
||||
}
|
||||
|
||||
return ApiError(500, "Could not get readme", err)
|
||||
return ApiError(500, "Could not get markdown file", err)
|
||||
} else {
|
||||
return Respond(200, content)
|
||||
}
|
||||
|
@ -18,14 +18,18 @@ func RenderToPng(c *middleware.Context) {
|
||||
Width: queryReader.Get("width", "800"),
|
||||
Height: queryReader.Get("height", "400"),
|
||||
OrgId: c.OrgId,
|
||||
Timeout: queryReader.Get("timeout", "30"),
|
||||
Timeout: queryReader.Get("timeout", "60"),
|
||||
Timezone: queryReader.Get("tz", ""),
|
||||
}
|
||||
|
||||
pngPath, err := renderer.RenderToPng(renderOpts)
|
||||
|
||||
if err != nil {
|
||||
c.Handle(500, "Failed to render to png", err)
|
||||
if err == renderer.ErrTimeout {
|
||||
c.Handle(500, err.Error(), err)
|
||||
}
|
||||
|
||||
c.Handle(500, "Rendering failed.", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -3,10 +3,8 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/trace"
|
||||
"strconv"
|
||||
@ -16,7 +14,6 @@ import (
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
@ -43,7 +40,6 @@ var pidFile = flag.String("pidfile", "", "path to pid file")
|
||||
var exitChan = make(chan int)
|
||||
|
||||
func init() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
|
||||
func main() {
|
||||
@ -88,46 +84,11 @@ func main() {
|
||||
server.Start()
|
||||
}
|
||||
|
||||
func initRuntime() {
|
||||
err := setting.NewConfigContext(&setting.CommandLineArgs{
|
||||
Config: *configFile,
|
||||
HomePath: *homePath,
|
||||
Args: flag.Args(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(3, err.Error())
|
||||
}
|
||||
|
||||
logger := log.New("main")
|
||||
logger.Info("Starting Grafana", "version", version, "commit", commit, "compiled", time.Unix(setting.BuildStamp, 0))
|
||||
|
||||
setting.LogConfigurationInfo()
|
||||
}
|
||||
|
||||
func initSql() {
|
||||
sqlstore.NewEngine()
|
||||
sqlstore.EnsureAdminUser()
|
||||
}
|
||||
|
||||
func writePIDFile() {
|
||||
if *pidFile == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure the required directory structure exists.
|
||||
err := os.MkdirAll(filepath.Dir(*pidFile), 0700)
|
||||
if err != nil {
|
||||
log.Fatal(3, "Failed to verify pid directory", err)
|
||||
}
|
||||
|
||||
// Retrieve the PID and write it.
|
||||
pid := strconv.Itoa(os.Getpid())
|
||||
if err := ioutil.WriteFile(*pidFile, []byte(pid), 0644); err != nil {
|
||||
log.Fatal(3, "Failed to write pidfile", err)
|
||||
}
|
||||
}
|
||||
|
||||
func listenToSystemSignals(server models.GrafanaServer) {
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
ignoreChan := make(chan os.Signal, 1)
|
||||
|
@ -2,7 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
@ -45,8 +50,9 @@ type GrafanaServerImpl struct {
|
||||
func (g *GrafanaServerImpl) Start() {
|
||||
go listenToSystemSignals(g)
|
||||
|
||||
writePIDFile()
|
||||
initRuntime()
|
||||
g.initLogging()
|
||||
g.writePIDFile()
|
||||
|
||||
initSql()
|
||||
metrics.Init()
|
||||
search.Init()
|
||||
@ -74,6 +80,22 @@ func (g *GrafanaServerImpl) Start() {
|
||||
g.startHttpServer()
|
||||
}
|
||||
|
||||
func (g *GrafanaServerImpl) initLogging() {
|
||||
err := setting.NewConfigContext(&setting.CommandLineArgs{
|
||||
Config: *configFile,
|
||||
HomePath: *homePath,
|
||||
Args: flag.Args(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
g.log.Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
g.log.Info("Starting Grafana", "version", version, "commit", commit, "compiled", time.Unix(setting.BuildStamp, 0))
|
||||
setting.LogConfigurationInfo()
|
||||
}
|
||||
|
||||
func (g *GrafanaServerImpl) startHttpServer() {
|
||||
g.httpServer = api.NewHttpServer()
|
||||
|
||||
@ -101,3 +123,25 @@ func (g *GrafanaServerImpl) Shutdown(code int, reason string) {
|
||||
log.Close()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func (g *GrafanaServerImpl) writePIDFile() {
|
||||
if *pidFile == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure the required directory structure exists.
|
||||
err := os.MkdirAll(filepath.Dir(*pidFile), 0700)
|
||||
if err != nil {
|
||||
g.log.Error("Failed to verify pid directory", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Retrieve the PID and write it.
|
||||
pid := strconv.Itoa(os.Getpid())
|
||||
if err := ioutil.WriteFile(*pidFile, []byte(pid), 0644); err != nil {
|
||||
g.log.Error("Failed to write pidfile", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
g.log.Info("Writing PID file", "path", *pidFile, "pid", pid)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user