mirror of
https://github.com/grafana/grafana.git
synced 2024-11-26 19:00:54 -06:00
Merge branch 'develop' into develop-color-tweaks
This commit is contained in:
commit
c94331cd20
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@ -4,8 +4,6 @@ Read before posting:
|
||||
- Checkout FAQ: https://community.grafana.com/c/howto/faq
|
||||
- Checkout How to troubleshoot metric query issues: https://community.grafana.com/t/how-to-troubleshoot-metric-query-issues/50
|
||||
|
||||
Please prefix your title with [Bug] or [Feature request].
|
||||
|
||||
Please include this information:
|
||||
- What Grafana version are you using?
|
||||
- What datasource are you using?
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -38,12 +38,14 @@ public/css/*.min.css
|
||||
conf/custom.ini
|
||||
fig.yml
|
||||
docker-compose.yml
|
||||
docker-compose.yaml
|
||||
profile.cov
|
||||
/grafana
|
||||
.notouch
|
||||
/pkg/cmd/grafana-cli/grafana-cli
|
||||
/pkg/cmd/grafana-server/grafana-server
|
||||
/pkg/cmd/grafana-server/debug
|
||||
debug.test
|
||||
/examples/*/dist
|
||||
/packaging/**/*.rpm
|
||||
/packaging/**/*.deb
|
||||
|
38
CHANGELOG.md
38
CHANGELOG.md
@ -11,7 +11,19 @@
|
||||
|
||||
## New Features
|
||||
* **Data Source Proxy**: Add support for whitelisting specified cookies that will be passed through to the data source when proxying data source requests [#5457](https://github.com/grafana/grafana/issues/5457), thanks [@robingustafsson](https://github.com/robingustafsson)
|
||||
* **Postgres/MySQL**: add __timeGroup macro for mysql [#9596](https://github.com/grafana/grafana/pull/9596), thanks [@svenklemm](https://github.com/svenklemm)
|
||||
* **Text**: Text panel are now edited in the ace editor. [#9698](https://github.com/grafana/grafana/pull/9698), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Teams**: Add Microsoft Teams notifier as [#8523](https://github.com/grafana/grafana/issues/8523), thx [@anthu](https://github.com/anthu)
|
||||
* **Datasources**: Its now possible to configure datasources with config files [#1789](https://github.com/grafana/grafana/issues/1789)
|
||||
* **Graphite**: Query editor updated to support new query by tag features [#9230](https://github.com/grafana/grafana/issues/9230)
|
||||
* **Dashboard history**: New config file option versions_to_keep sets how many versions per dashboard to store, [#9671](https://github.com/grafana/grafana/issues/9671)
|
||||
|
||||
## Minor
|
||||
* **Alert panel**: Adds placeholder text when no alerts are within the time range [#9624](https://github.com/grafana/grafana/issues/9624), thx [@straend](https://github.com/straend)
|
||||
* **Mysql**: MySQL enable MaxOpenCon and MaxIdleCon regards how constring is configured. [#9784](https://github.com/grafana/grafana/issues/9784), thx [@dfredell](https://github.com/dfredell)
|
||||
* **Cloudwatch**: Fixes broken query inspector for cloudwatch [#9661](https://github.com/grafana/grafana/issues/9661), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Dashboard**: Make it possible to start dashboards from search and dashboard list panel [#1871](https://github.com/grafana/grafana/issues/1871)
|
||||
* **Annotations**: Posting annotations now return the id of the annotation [#9798](https://github.com/grafana/grafana/issues/9798)
|
||||
|
||||
## Tech
|
||||
* **RabbitMq**: Remove support for publishing events to RabbitMQ [#9645](https://github.com/grafana/grafana/issues/9645)
|
||||
@ -21,6 +33,32 @@
|
||||
* **Singlestat**: suppress error when result contains no datapoints [#9636](https://github.com/grafana/grafana/issues/9636), thx [@utkarshcmu](https://github.com/utkarshcmu)
|
||||
* **Postgres/MySQL**: Control quoting in SQL-queries when using template variables [#9030](https://github.com/grafana/grafana/issues/9030), thanks [@svenklemm](https://github.com/svenklemm)
|
||||
|
||||
# 4.6.3 (unreleased)
|
||||
|
||||
## Fixes
|
||||
* **Gzip**: Fixes bug gravatar images when gzip was enabled [#5952](https://github.com/grafana/grafana/issues/5952)
|
||||
* **Alert list**: Now shows alert state changes even after adding manual annotations on dashboard [#9951](https://github.com/grafana/grafana/issues/9951)
|
||||
|
||||
# 4.6.2 (2017-11-16)
|
||||
|
||||
## Important
|
||||
* **Prometheus**: Fixes bug with new prometheus alerts in Grafana. Make sure to download this version if your using Prometheus for alerting. More details in the issue. [#9777](https://github.com/grafana/grafana/issues/9777)
|
||||
|
||||
## Fixes
|
||||
* **Color picker**: Bug after using textbox input field to change/paste color string [#9769](https://github.com/grafana/grafana/issues/9769)
|
||||
* **Cloudwatch**: Fix for cloudwatch templating query `ec2_instance_attribute` [#9667](https://github.com/grafana/grafana/issues/9667), thanks [@mtanda](https://github.com/mtanda)
|
||||
* **Heatmap**: Fixed tooltip for "time series buckets" mode [#9332](https://github.com/grafana/grafana/issues/9332)
|
||||
* **InfluxDB**: Fixed query editor issue when using `>` or `<` operators in WHERE clause [#9871](https://github.com/grafana/grafana/issues/9871)
|
||||
|
||||
|
||||
# 4.6.1 (2017-11-01)
|
||||
|
||||
* **Singlestat**: Lost thresholds when using save dashboard as [#9681](https://github.com/grafana/grafana/issues/9681)
|
||||
* **Graph**: Fix for series override color picker [#9715](https://github.com/grafana/grafana/issues/9715)
|
||||
* **Go**: build using golang 1.9.2 [#9713](https://github.com/grafana/grafana/issues/9713)
|
||||
* **Plugins**: Fixed problem with loading plugin js files behind auth proxy [#9509](https://github.com/grafana/grafana/issues/9509)
|
||||
* **Graphite**: Annotation tooltip should render empty string when undefined [#9707](https://github.com/grafana/grafana/issues/9707)
|
||||
|
||||
# 4.6.0 (2017-10-26)
|
||||
|
||||
## Fixes
|
||||
|
109
Dockerfile
Normal file
109
Dockerfile
Normal file
@ -0,0 +1,109 @@
|
||||
FROM phusion/baseimage:0.9.22
|
||||
MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com>
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y install vim \
|
||||
nginx \
|
||||
python-dev \
|
||||
python-flup \
|
||||
python-pip \
|
||||
python-ldap \
|
||||
expect \
|
||||
git \
|
||||
memcached \
|
||||
sqlite3 \
|
||||
libffi-dev \
|
||||
libcairo2 \
|
||||
libcairo2-dev \
|
||||
python-cairo \
|
||||
python-rrdtool \
|
||||
pkg-config \
|
||||
nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# choose a timezone at build-time
|
||||
# use `--build-arg CONTAINER_TIMEZONE=Europe/Brussels` in `docker build`
|
||||
ARG CONTAINER_TIMEZONE
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
RUN if [ ! -z "${CONTAINER_TIMEZONE}" ]; \
|
||||
then ln -sf /usr/share/zoneinfo/$CONTAINER_TIMEZONE /etc/localtime && \
|
||||
dpkg-reconfigure -f noninteractive tzdata; \
|
||||
fi
|
||||
|
||||
# fix python dependencies (LTS Django and newer memcached/txAMQP)
|
||||
RUN pip install --upgrade pip && \
|
||||
pip install django==1.8.18 \
|
||||
python-memcached==1.53 \
|
||||
txAMQP==0.6.2
|
||||
|
||||
ARG version=1.0.2
|
||||
ARG whisper_version=${version}
|
||||
ARG carbon_version=${version}
|
||||
ARG graphite_version=${version}
|
||||
|
||||
ARG statsd_version=v0.7.2
|
||||
|
||||
# install whisper
|
||||
RUN git clone -b ${whisper_version} --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper
|
||||
WORKDIR /usr/local/src/whisper
|
||||
RUN python ./setup.py install
|
||||
|
||||
# install carbon
|
||||
RUN git clone -b ${carbon_version} --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon
|
||||
WORKDIR /usr/local/src/carbon
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install graphite
|
||||
RUN git clone -b ${graphite_version} --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
WORKDIR /usr/local/src/graphite-web
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
# ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
|
||||
WORKDIR /opt/graphite/webapp
|
||||
RUN mkdir -p /var/log/graphite/ \
|
||||
&& PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings
|
||||
|
||||
# install statsd
|
||||
RUN git clone -b ${statsd_version} https://github.com/etsy/statsd.git /opt/statsd
|
||||
ADD conf/opt/statsd/config.js /opt/statsd/config.js
|
||||
|
||||
# config nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
ADD conf/etc/nginx/nginx.conf /etc/nginx/nginx.conf
|
||||
ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/graphite-statsd.conf
|
||||
|
||||
# init django admin
|
||||
ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp
|
||||
ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh
|
||||
RUN chmod +x /usr/local/bin/manage.sh && /usr/local/bin/django_admin_init.exp
|
||||
|
||||
# logging support
|
||||
RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx
|
||||
ADD conf/etc/logrotate.d/graphite-statsd /etc/logrotate.d/graphite-statsd
|
||||
|
||||
# daemons
|
||||
ADD conf/etc/service/carbon/run /etc/service/carbon/run
|
||||
ADD conf/etc/service/carbon-aggregator/run /etc/service/carbon-aggregator/run
|
||||
ADD conf/etc/service/graphite/run /etc/service/graphite/run
|
||||
ADD conf/etc/service/statsd/run /etc/service/statsd/run
|
||||
ADD conf/etc/service/nginx/run /etc/service/nginx/run
|
||||
|
||||
# default conf setup
|
||||
ADD conf /etc/graphite-statsd/conf
|
||||
ADD conf/etc/my_init.d/01_conf_init.sh /etc/my_init.d/01_conf_init.sh
|
||||
|
||||
# cleanup
|
||||
RUN apt-get clean\
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# defaults
|
||||
EXPOSE 80 2003-2004 2023-2024 8125/udp 8126
|
||||
VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"]
|
||||
WORKDIR /
|
||||
ENV HOME /root
|
||||
CMD ["/sbin/my_init"]
|
22
LICENSE.txt
Normal file
22
LICENSE.txt
Normal file
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2013-2016 Nathan Hopkins
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
25
ROADMAP.md
25
ROADMAP.md
@ -1,29 +1,36 @@
|
||||
# Roadmap (2017-08-29)
|
||||
# Roadmap (2017-10-31)
|
||||
|
||||
This roadmap is a tentative plan for the core development team. Things change constantly as PRs come in and priorities change.
|
||||
But it will give you an idea of our current vision and plan.
|
||||
|
||||
### Short term (1-4 months)
|
||||
|
||||
- Release Grafana v4.5 with fixes and minor enhancements
|
||||
- Release Grafana v5
|
||||
- User groups
|
||||
- Dashboard folders
|
||||
- Dashboard permissions (on folders as well), permissions on groups or users
|
||||
- Dashboard & folder permissions (assigned to users or groups)
|
||||
- New Dashboard layout engine
|
||||
- New sidemenu & nav UX
|
||||
- Elasticsearch alerting
|
||||
- React migration foundation (core components)
|
||||
- Graphite 1.1 Tags Support
|
||||
|
||||
### Long term
|
||||
### Long term (4 - 8 months)
|
||||
|
||||
- Backend plugins to support more Auth options, Alerting data sources & notifications
|
||||
- Universal time series transformations for any data source (meta queries)
|
||||
- Reporting
|
||||
- Web socket & live data streams
|
||||
- Migrate to Angular2 or react
|
||||
- Alerting improvements (silence, per series tracking, etc)
|
||||
- Dashboard as configuration and other automation / provisioning improvements
|
||||
- Progress on React migration
|
||||
- Change visualization (panel type) on the fly.
|
||||
- Multi stat panel (vertical version of singlestat with bars/graph mode with big number etc)
|
||||
- Repeat panel by query results
|
||||
|
||||
### In a distant future far far away
|
||||
|
||||
- Meta queries
|
||||
- Integrated light weight TSDB
|
||||
- Web socket & live data sources
|
||||
|
||||
### Outside contributions
|
||||
We know this is being worked on right now by contributors (and we hope to merge it when it's ready).
|
||||
|
||||
- Clustering for alert engine (load distribution)
|
||||
|
@ -7,7 +7,7 @@ clone_folder: c:\gopath\src\github.com\grafana\grafana
|
||||
environment:
|
||||
nodejs_version: "6"
|
||||
GOPATH: c:\gopath
|
||||
GOVERSION: 1.9.1
|
||||
GOVERSION: 1.9.2
|
||||
|
||||
install:
|
||||
- rmdir c:\go /s /q
|
||||
|
@ -9,7 +9,7 @@ machine:
|
||||
GOPATH: "/home/ubuntu/.go_workspace"
|
||||
ORG_PATH: "github.com/grafana"
|
||||
REPO_PATH: "${ORG_PATH}/grafana"
|
||||
GODIST: "go1.9.1.linux-amd64.tar.gz"
|
||||
GODIST: "go1.9.2.linux-amd64.tar.gz"
|
||||
post:
|
||||
- mkdir -p ~/download
|
||||
- mkdir -p ~/docker
|
||||
|
@ -7,5 +7,7 @@ coverage:
|
||||
project: yes
|
||||
patch: yes
|
||||
changes: no
|
||||
|
||||
comment: false
|
||||
|
||||
comment:
|
||||
layout: "diff"
|
||||
behavior: "once"
|
||||
|
48
conf/datasources/datasources.yaml
Normal file
48
conf/datasources/datasources.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# list of datasources that should be deleted from the database
|
||||
delete_datasources:
|
||||
# - name: Graphite
|
||||
# org_id: 1
|
||||
|
||||
# list of datasources to insert/update depending
|
||||
# whats available in the datbase
|
||||
datasources:
|
||||
# # <string, required> name of the datasource. Required
|
||||
# - name: Graphite
|
||||
# # <string, required> datasource type. Required
|
||||
# type: graphite
|
||||
# # <string, required> access mode. direct or proxy. Required
|
||||
# access: proxy
|
||||
# # <int> org id. will default to org_id 1 if not specified
|
||||
# org_id: 1
|
||||
# # <string> url
|
||||
# url: http://localhost:8080
|
||||
# # <string> database password, if used
|
||||
# password:
|
||||
# # <string> database user, if used
|
||||
# user:
|
||||
# # <string> database name, if used
|
||||
# database:
|
||||
# # <bool> enable/disable basic auth
|
||||
# basic_auth:
|
||||
# # <string> basic auth username
|
||||
# basic_auth_user:
|
||||
# # <string> basic auth password
|
||||
# basic_auth_password:
|
||||
# # <bool> enable/disable with credentials headers
|
||||
# with_credentials:
|
||||
# # <bool> mark as default datasource. Max one per org
|
||||
# is_default:
|
||||
# # <map> fields that will be converted to json and stored in json_data
|
||||
# json_data:
|
||||
# graphiteVersion: "1.1"
|
||||
# tlsAuth: true
|
||||
# tlsAuthWithCACert: true
|
||||
# # <string> json object of data that will be encrypted.
|
||||
# secure_json_data:
|
||||
# tlsCACert: "..."
|
||||
# tlsClientCert: "..."
|
||||
# tlsClientKey: "..."
|
||||
# version: 1
|
||||
# # <bool> allow users to edit datasources from the UI.
|
||||
# editable: false
|
||||
|
@ -12,17 +12,17 @@ instance_name = ${HOSTNAME}
|
||||
#################################### Paths ###############################
|
||||
[paths]
|
||||
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
|
||||
#
|
||||
data = data
|
||||
#
|
||||
|
||||
# Directory where grafana can store logs
|
||||
#
|
||||
logs = data/log
|
||||
#
|
||||
|
||||
# Directory where grafana will automatically scan and look for plugins
|
||||
#
|
||||
plugins = data/plugins
|
||||
|
||||
# Config files containing datasources that will be configured at startup
|
||||
datasources = conf/datasources
|
||||
|
||||
#################################### Server ##############################
|
||||
[server]
|
||||
# Protocol (http, https, socket)
|
||||
@ -82,6 +82,9 @@ max_idle_conn = 2
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
max_open_conn =
|
||||
|
||||
# Set to true to log the sql calls and execution times.
|
||||
log_queries =
|
||||
|
||||
# For "postgres", use either "disable", "require" or "verify-full"
|
||||
# For "mysql", use either "true", "false", or "skip-verify".
|
||||
ssl_mode = disable
|
||||
@ -171,6 +174,7 @@ disable_gravatar = false
|
||||
# data source proxy whitelist (ip_or_domain:port separated by spaces)
|
||||
data_source_proxy_whitelist =
|
||||
|
||||
#################################### Snapshots ###########################
|
||||
[snapshots]
|
||||
# snapshot sharing options
|
||||
external_enabled = true
|
||||
@ -183,7 +187,13 @@ snapshot_remove_expired = true
|
||||
# remove snapshots after 90 days
|
||||
snapshot_TTL_days = 90
|
||||
|
||||
#################################### Users ####################################
|
||||
#################################### Dashboards ##################
|
||||
|
||||
[dashboards]
|
||||
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
|
||||
versions_to_keep = 20
|
||||
|
||||
#################################### Users ###############################
|
||||
[users]
|
||||
# disable user signup / registration
|
||||
allow_sign_up = false
|
||||
@ -429,7 +439,7 @@ enabled = true
|
||||
execute_alerts = true
|
||||
|
||||
#################################### Internal Grafana Metrics ############
|
||||
# Metrics available at HTTP API Url /api/metrics
|
||||
# Metrics available at HTTP API Url /metrics
|
||||
[metrics]
|
||||
enabled = true
|
||||
interval_seconds = 10
|
||||
|
@ -12,18 +12,17 @@
|
||||
#################################### Paths ####################################
|
||||
[paths]
|
||||
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
|
||||
#
|
||||
;data = /var/lib/grafana
|
||||
#
|
||||
|
||||
# Directory where grafana can store logs
|
||||
#
|
||||
;logs = /var/log/grafana
|
||||
#
|
||||
|
||||
# Directory where grafana will automatically scan and look for plugins
|
||||
#
|
||||
;plugins = /var/lib/grafana/plugins
|
||||
|
||||
#
|
||||
# Config files containing datasources that will be configured at startup
|
||||
;datasources = conf/datasources
|
||||
|
||||
#################################### Server ####################################
|
||||
[server]
|
||||
# Protocol (http, https, socket)
|
||||
@ -91,6 +90,8 @@
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
;max_open_conn =
|
||||
|
||||
# Set to true to log the sql calls and execution times.
|
||||
log_queries =
|
||||
|
||||
#################################### Session ####################################
|
||||
[session]
|
||||
@ -161,6 +162,7 @@
|
||||
# data source proxy whitelist (ip_or_domain:port separated by spaces)
|
||||
;data_source_proxy_whitelist =
|
||||
|
||||
#################################### Snapshots ###########################
|
||||
[snapshots]
|
||||
# snapshot sharing options
|
||||
;external_enabled = true
|
||||
@ -173,7 +175,12 @@
|
||||
# remove snapshots after 90 days
|
||||
;snapshot_TTL_days = 90
|
||||
|
||||
#################################### Users ####################################
|
||||
#################################### Dashboards History ##################
|
||||
[dashboards]
|
||||
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
|
||||
;versions_to_keep = 20
|
||||
|
||||
#################################### Users ###############################
|
||||
[users]
|
||||
# disable user signup / registration
|
||||
;allow_sign_up = true
|
||||
@ -373,7 +380,7 @@
|
||||
;execute_alerts = true
|
||||
|
||||
#################################### Internal Grafana Metrics ##########################
|
||||
# Metrics available at HTTP API Url /api/metrics
|
||||
# Metrics available at HTTP API Url /metrics
|
||||
[metrics]
|
||||
# Disable / Enable internal metrics
|
||||
;enabled = true
|
||||
|
11
docker/blocks/collectd/docker-compose.yaml
Normal file
11
docker/blocks/collectd/docker-compose.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
collectd:
|
||||
build: blocks/collectd
|
||||
environment:
|
||||
HOST_NAME: myserver
|
||||
GRAPHITE_HOST: graphite
|
||||
GRAPHITE_PORT: 2003
|
||||
GRAPHITE_PREFIX: collectd.
|
||||
REPORT_BY_CPU: 'false'
|
||||
COLLECT_INTERVAL: 10
|
||||
links:
|
||||
- graphite
|
@ -1,11 +0,0 @@
|
||||
collectd:
|
||||
build: blocks/collectd
|
||||
environment:
|
||||
HOST_NAME: myserver
|
||||
GRAPHITE_HOST: graphite
|
||||
GRAPHITE_PORT: 2003
|
||||
GRAPHITE_PREFIX: collectd.
|
||||
REPORT_BY_CPU: 'false'
|
||||
COLLECT_INTERVAL: 10
|
||||
links:
|
||||
- graphite
|
8
docker/blocks/elastic/docker-compose.yaml
Normal file
8
docker/blocks/elastic/docker-compose.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
elasticsearch:
|
||||
image: elasticsearch:2.4.1
|
||||
command: elasticsearch -Des.network.host=0.0.0.0
|
||||
ports:
|
||||
- "9200:9200"
|
||||
- "9300:9300"
|
||||
volumes:
|
||||
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
|
@ -1,8 +0,0 @@
|
||||
elasticsearch:
|
||||
image: elasticsearch:2.4.1
|
||||
command: elasticsearch -Des.network.host=0.0.0.0
|
||||
ports:
|
||||
- "9200:9200"
|
||||
- "9300:9300"
|
||||
volumes:
|
||||
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
|
8
docker/blocks/elastic1/docker-compose.yaml
Normal file
8
docker/blocks/elastic1/docker-compose.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
elasticsearch1:
|
||||
image: elasticsearch:1.7.6
|
||||
command: elasticsearch -Des.network.host=0.0.0.0
|
||||
ports:
|
||||
- "11200:9200"
|
||||
- "11300:9300"
|
||||
volumes:
|
||||
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
|
@ -1,8 +0,0 @@
|
||||
elasticsearch1:
|
||||
image: elasticsearch:1.7.6
|
||||
command: elasticsearch -Des.network.host=0.0.0.0
|
||||
ports:
|
||||
- "11200:9200"
|
||||
- "11300:9300"
|
||||
volumes:
|
||||
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
|
8
docker/blocks/elastic5/docker-compose.yaml
Normal file
8
docker/blocks/elastic5/docker-compose.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
# You need to run 'sysctl -w vm.max_map_count=262144' on the host machine
|
||||
|
||||
elasticsearch5:
|
||||
image: elasticsearch:5
|
||||
command: elasticsearch
|
||||
ports:
|
||||
- "10200:9200"
|
||||
- "10300:9300"
|
@ -1,8 +0,0 @@
|
||||
# You need to run 'sysctl -w vm.max_map_count=262144' on the host machine
|
||||
|
||||
elasticsearch5:
|
||||
image: elasticsearch:5
|
||||
command: elasticsearch
|
||||
ports:
|
||||
- "10200:9200"
|
||||
- "10300:9300"
|
16
docker/blocks/graphite/docker-compose.yaml
Normal file
16
docker/blocks/graphite/docker-compose.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
graphite:
|
||||
build: blocks/graphite
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
@ -1,16 +0,0 @@
|
||||
graphite:
|
||||
build: blocks/graphite
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
@ -1,9 +1,10 @@
|
||||
FROM phusion/baseimage:0.9.22
|
||||
MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com>
|
||||
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y --force-yes install vim \
|
||||
&& apt-get -y install vim \
|
||||
nginx \
|
||||
python-dev \
|
||||
python-flup \
|
||||
@ -22,38 +23,67 @@ RUN apt-get -y update \
|
||||
nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# choose a timezone at build-time
|
||||
# use `--build-arg CONTAINER_TIMEZONE=Europe/Brussels` in `docker build`
|
||||
ARG CONTAINER_TIMEZONE
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
RUN if [ ! -z "${CONTAINER_TIMEZONE}" ]; \
|
||||
then ln -sf /usr/share/zoneinfo/$CONTAINER_TIMEZONE /etc/localtime && \
|
||||
dpkg-reconfigure -f noninteractive tzdata; \
|
||||
fi
|
||||
|
||||
# fix python dependencies (LTS Django and newer memcached/txAMQP)
|
||||
RUN pip install django==1.8.18 \
|
||||
RUN pip install --upgrade pip && \
|
||||
pip install django==1.8.18 \
|
||||
python-memcached==1.53 \
|
||||
txAMQP==0.6.2 \
|
||||
&& pip install --upgrade pip
|
||||
txAMQP==0.6.2
|
||||
|
||||
ARG version=1.0.2
|
||||
ARG whisper_version=${version}
|
||||
ARG carbon_version=${version}
|
||||
ARG graphite_version=${version}
|
||||
|
||||
RUN echo "Building Version: $version"
|
||||
|
||||
ARG whisper_repo=https://github.com/graphite-project/whisper.git
|
||||
ARG carbon_repo=https://github.com/graphite-project/carbon.git
|
||||
ARG graphite_repo=https://github.com/graphite-project/graphite-web.git
|
||||
|
||||
ARG statsd_version=v0.8.0
|
||||
|
||||
ARG statsd_repo=https://github.com/etsy/statsd.git
|
||||
|
||||
# install whisper
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper
|
||||
RUN git clone -b ${whisper_version} --depth 1 ${whisper_repo} /usr/local/src/whisper
|
||||
WORKDIR /usr/local/src/whisper
|
||||
RUN python ./setup.py install
|
||||
|
||||
# install carbon
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon
|
||||
RUN git clone -b ${carbon_version} --depth 1 ${carbon_repo} /usr/local/src/carbon
|
||||
WORKDIR /usr/local/src/carbon
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install graphite
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
RUN git clone -b ${graphite_version} --depth 1 ${graphite_repo} /usr/local/src/graphite-web
|
||||
WORKDIR /usr/local/src/graphite-web
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install statsd
|
||||
RUN git clone -b ${statsd_version} ${statsd_repo} /opt/statsd
|
||||
|
||||
# config graphite
|
||||
ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
|
||||
# ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
|
||||
WORKDIR /opt/graphite/webapp
|
||||
RUN mkdir -p /var/log/graphite/ \
|
||||
&& PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings
|
||||
|
||||
# install statsd
|
||||
RUN git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
ADD conf/opt/statsd/config.js /opt/statsd/config.js
|
||||
# config statsd
|
||||
ADD conf/opt/statsd/config.js /opt/statsd/
|
||||
|
||||
# config nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
@ -63,8 +93,7 @@ ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/g
|
||||
# init django admin
|
||||
ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp
|
||||
ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh
|
||||
RUN chmod +x /usr/local/bin/manage.sh \
|
||||
&& /usr/local/bin/django_admin_init.exp
|
||||
RUN chmod +x /usr/local/bin/manage.sh && /usr/local/bin/django_admin_init.exp
|
||||
|
||||
# logging support
|
||||
RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx
|
||||
@ -86,8 +115,10 @@ RUN apt-get clean\
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# defaults
|
||||
EXPOSE 80 2003-2004 2023-2024 8125/udp 8126
|
||||
EXPOSE 80 2003-2004 2023-2024 8125 8125/udp 8126
|
||||
VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"]
|
||||
WORKDIR /
|
||||
ENV HOME /root
|
||||
ENV STATSD_INTERFACE udp
|
||||
|
||||
CMD ["/sbin/my_init"]
|
||||
|
@ -12,7 +12,7 @@ graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit)
|
||||
graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit)
|
||||
graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit)
|
||||
if [[ -z $graphite_dir_contents ]]; then
|
||||
git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
# git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
cd /usr/local/src/graphite-web && python ./setup.py install
|
||||
fi
|
||||
if [[ -z $graphite_storage_dir_contents ]]; then
|
||||
|
@ -8,18 +8,18 @@
|
||||
# Defaults to ../
|
||||
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives).
|
||||
# Defaults to $GRAPHITE_ROOT/conf/
|
||||
# GRAPHITE_STORAGE_DIR - Storage directory for whipser/rrd/log/pid files.
|
||||
# GRAPHITE_STORAGE_DIR - Storage directory for whisper/rrd/log/pid files.
|
||||
# Defaults to $GRAPHITE_ROOT/storage/
|
||||
#
|
||||
# To change other directory paths, add settings to this file. The following
|
||||
# configuration variables are available with these default values:
|
||||
#
|
||||
# STORAGE_DIR = $GRAPHITE_STORAGE_DIR
|
||||
# LOCAL_DATA_DIR = STORAGE_DIR/whisper/
|
||||
# WHITELISTS_DIR = STORAGE_DIR/lists/
|
||||
# CONF_DIR = STORAGE_DIR/conf/
|
||||
# LOG_DIR = STORAGE_DIR/log/
|
||||
# PID_DIR = STORAGE_DIR/
|
||||
# LOCAL_DATA_DIR = %(STORAGE_DIR)s/whisper/
|
||||
# WHITELISTS_DIR = %(STORAGE_DIR)s/lists/
|
||||
# CONF_DIR = %(STORAGE_DIR)s/conf/
|
||||
# LOG_DIR = %(STORAGE_DIR)s/log/
|
||||
# PID_DIR = %(STORAGE_DIR)s/
|
||||
#
|
||||
# For FHS style directory structures, use:
|
||||
#
|
||||
@ -30,20 +30,30 @@
|
||||
#
|
||||
#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Enable daily log rotation. If disabled, a kill -HUP can be used after a manual rotate
|
||||
# Specify the database library used to store metric data on disk. Each database
|
||||
# may have configurable options to change the behaviour of how it writes to
|
||||
# persistent storage.
|
||||
#
|
||||
# whisper - Fixed-size database, similar in design and purpose to RRD. This is
|
||||
# the default storage backend for carbon and the most rigorously tested.
|
||||
#
|
||||
# ceres - Experimental alternative database that supports storing data in sparse
|
||||
# files of arbitrary fixed-size resolutions.
|
||||
DATABASE = whisper
|
||||
|
||||
# Enable daily log rotation. If disabled, a new file will be opened whenever the log file path no
|
||||
# longer exists (i.e. it is removed or renamed)
|
||||
ENABLE_LOGROTATION = True
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# If this is blank carbon-cache runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
#
|
||||
# NOTE: The above settings must be set under [relay] and [aggregator]
|
||||
# to take effect for those daemons as well
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
# value should be an integer number of metric datapoints.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
@ -60,14 +70,30 @@ MAX_UPDATES_PER_SECOND = 500
|
||||
# MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# Setting this value low (e.g. 50) is a good way to ensure that your carbon
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
# sent to it. The trade off is that any metrics received in excess of this
|
||||
# value will be silently dropped, and the whisper file will not be created
|
||||
# until such point as a subsequent metric is received and fits within the
|
||||
# defined rate limit. Setting this value high (like "inf" for infinity) will
|
||||
# cause carbon to create the files quickly but at the risk of increased I/O.
|
||||
MAX_CREATES_PER_MINUTE = 50
|
||||
|
||||
# Set the minimum timestamp resolution supported by this instance. This allows
|
||||
# internal optimisations by overwriting points with equal truncated timestamps
|
||||
# in order to limit the number of updates to the database. It defaults to one
|
||||
# second.
|
||||
MIN_TIMESTAMP_RESOLUTION = 1
|
||||
|
||||
# Set the minimum lag in seconds for a point to be written to the database
|
||||
# in order to optimize batching. This means that each point will wait at least
|
||||
# the duration of this lag before being written. Setting this to 0 disable the feature.
|
||||
# This currently only works when using the timesorted write strategy.
|
||||
# MIN_TIMESTAMP_LAG = 0
|
||||
|
||||
# Set the interface and port for the line (plain text) listener. Setting the
|
||||
# interface to 0.0.0.0 listens on all interfaces. Port can be set to 0 to
|
||||
# disable this listener if it is not required.
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
@ -78,11 +104,23 @@ ENABLE_UDP_LISTENER = False
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
# Set the interface and port for the pickle listener. Setting the interface to
|
||||
# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this
|
||||
# listener if it is not required.
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
# Set the interface and port for the protobuf listener. Setting the interface to
|
||||
# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this
|
||||
# listener if it is not required.
|
||||
# PROTOBUF_RECEIVER_INTERFACE = 0.0.0.0
|
||||
# PROTOBUF_RECEIVER_PORT = 2005
|
||||
|
||||
# Limit the number of open connections the receiver can handle as any time.
|
||||
# Default is no limit. Setting up a limit for sites handling high volume
|
||||
# traffic may be recommended to avoid running out of TCP memory or having
|
||||
# thousands of TCP connections reduce the throughput of the service.
|
||||
#MAX_RECEIVER_CONNECTIONS = inf
|
||||
|
||||
# Per security concerns outlined in Bug #817247 the pickle receiver
|
||||
# will use a more secure and slightly less efficient unpickler.
|
||||
@ -98,13 +136,19 @@ CACHE_QUERY_PORT = 7002
|
||||
# data until the cache size falls below 95% MAX_CACHE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# By default, carbon-cache will log every whisper update and cache hit. This can be excessive and
|
||||
# degrade performance if logging on the same volume as the whisper data is stored.
|
||||
LOG_UPDATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
LOG_CACHE_QUEUE_SORTS = True
|
||||
# If enabled this setting is used to timeout metric client connection if no
|
||||
# metrics have been sent in specified time in seconds
|
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None
|
||||
|
||||
# The thread that writes metrics to disk can use on of the following strategies
|
||||
# By default, carbon-cache will log every whisper update and cache hit.
|
||||
# This can be excessive and degrade performance if logging on the same
|
||||
# volume as the whisper data is stored.
|
||||
LOG_UPDATES = False
|
||||
LOG_CREATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
LOG_CACHE_QUEUE_SORTS = False
|
||||
|
||||
# The thread that writes metrics to disk can use one of the following strategies
|
||||
# determining the order in which metrics are removed from cache and flushed to
|
||||
# disk. The default option preserves the same behavior as has been historically
|
||||
# available in version 0.9.10.
|
||||
@ -114,6 +158,12 @@ LOG_CACHE_QUEUE_SORTS = True
|
||||
# moment of the list's creation. Metrics will then be flushed from the cache to
|
||||
# disk in that order.
|
||||
#
|
||||
# timesorted - All metrics in the list will be looked at and sorted according
|
||||
# to the timestamp of there datapoints. The metric that were the least recently
|
||||
# written will be written first. This is an hybrid strategy between max and
|
||||
# sorted which is particularly adapted to sets of metrics with non-uniform
|
||||
# resolutions.
|
||||
#
|
||||
# max - The writer thread will always pop and flush the metric from cache
|
||||
# that has the most datapoints. This will give a strong flush preference to
|
||||
# frequently updated metrics and will also reduce random file-io. Infrequently
|
||||
@ -152,12 +202,61 @@ WHISPER_FALLOCATE_CREATE = True
|
||||
|
||||
# Enabling this option will cause Whisper to lock each Whisper file it writes
|
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
|
||||
# multiple carbon-cache daemons are writing to the same files
|
||||
# multiple carbon-cache daemons are writing to the same files.
|
||||
# WHISPER_LOCK_WRITES = False
|
||||
|
||||
# On systems which has a large number of metrics, an amount of Whisper write(2)'s
|
||||
# pageback sometimes cause disk thrashing due to memory shortage, so that abnormal
|
||||
# disk reads occur. Enabling this option makes it possible to decrease useless
|
||||
# page cache memory by posix_fadvise(2) with POSIX_FADVISE_RANDOM option.
|
||||
# WHISPER_FADVISE_RANDOM = False
|
||||
|
||||
# By default all nodes stored in Ceres are cached in memory to improve the
|
||||
# throughput of reads and writes to underlying slices. Turning this off will
|
||||
# greatly reduce memory consumption for databases with millions of metrics, at
|
||||
# the cost of a steep increase in disk i/o, approximately an extra two os.stat
|
||||
# calls for every read and write. Reasons to do this are if the underlying
|
||||
# storage can handle stat() with practically zero cost (SSD, NVMe, zRAM).
|
||||
# Valid values are:
|
||||
# all - all nodes are cached
|
||||
# none - node caching is disabled
|
||||
# CERES_NODE_CACHING_BEHAVIOR = all
|
||||
|
||||
# Ceres nodes can have many slices and caching the right ones can improve
|
||||
# performance dramatically. Note that there are many trade-offs to tinkering
|
||||
# with this, and unless you are a ceres developer you *really* should not
|
||||
# mess with this. Valid values are:
|
||||
# latest - only the most recent slice is cached
|
||||
# all - all slices are cached
|
||||
# none - slice caching is disabled
|
||||
# CERES_SLICE_CACHING_BEHAVIOR = latest
|
||||
|
||||
# If a Ceres node accumulates too many slices, performance can suffer.
|
||||
# This can be caused by intermittently reported data. To mitigate
|
||||
# slice fragmentation there is a tolerance for how much space can be
|
||||
# wasted within a slice file to avoid creating a new one. That tolerance
|
||||
# level is determined by MAX_SLICE_GAP, which is the number of consecutive
|
||||
# null datapoints allowed in a slice file.
|
||||
# If you set this very low, you will waste less of the *tiny* bit disk space
|
||||
# that this feature wastes, and you will be prone to performance problems
|
||||
# caused by slice fragmentation, which can be pretty severe.
|
||||
# If you set this really high, you will waste a bit more disk space (each
|
||||
# null datapoint wastes 8 bytes, but keep in mind your filesystem's block
|
||||
# size). If you suffer slice fragmentation issues, you should increase this or
|
||||
# run the ceres-maintenance defrag plugin more often. However you should not
|
||||
# set it to be huge because then if a large but allowed gap occurs it has to
|
||||
# get filled in, which means instead of a simple 8-byte write to a new file we
|
||||
# could end up doing an (8 * MAX_SLICE_GAP)-byte write to the latest slice.
|
||||
# CERES_MAX_SLICE_GAP = 80
|
||||
|
||||
# Enabling this option will cause Ceres to lock each Ceres file it writes to
|
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
|
||||
# multiple carbon-cache daemons are writing to the same files.
|
||||
# CERES_LOCK_WRITES = False
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
|
||||
# missing or empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
@ -203,16 +302,25 @@ WHISPER_FALLOCATE_CREATE = True
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# URL of graphite-web instance, this is used to add incoming series to the tag database
|
||||
GRAPHITE_URL = http://127.0.0.1:80
|
||||
|
||||
# Tag update interval, this specifies how frequently updates to existing series will trigger
|
||||
# an update to the tag index, the default setting is once every 100 updates
|
||||
# TAG_UPDATE_INTERVAL = 100
|
||||
|
||||
# To configure special settings for the carbon-cache instance 'b', uncomment this:
|
||||
#[cache:b]
|
||||
#LINE_RECEIVER_PORT = 2103
|
||||
#PICKLE_RECEIVER_PORT = 2104
|
||||
#CACHE_QUERY_PORT = 7102
|
||||
# and any other settings you want to customize, defaults are inherited
|
||||
# from [carbon] section.
|
||||
# from the [cache] section.
|
||||
# You can then specify the --instance=b option to manage this instance
|
||||
|
||||
|
||||
#
|
||||
# In order to turn off logging of successful connections for the line
|
||||
# receiver, set this to False
|
||||
# LOG_LISTENER_CONN_SUCCESS = True
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
@ -220,9 +328,6 @@ LINE_RECEIVER_PORT = 2013
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2014
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# Carbon-relay has several options for metric routing controlled by RELAY_METHOD
|
||||
#
|
||||
# Use relay-rules.conf to route metrics to destinations based on pattern rules
|
||||
@ -237,12 +342,24 @@ LOG_LISTENER_CONNECTIONS = True
|
||||
# instance.
|
||||
# Enable this for carbon-relays that send to a group of carbon-aggregators
|
||||
#RELAY_METHOD = aggregated-consistent-hashing
|
||||
#
|
||||
# You can also use fast-hashing and fast-aggregated-hashing which are in O(1)
|
||||
# and will always redirect the metrics to the same destination but do not try
|
||||
# to minimize rebalancing when the list of destinations is changing.
|
||||
RELAY_METHOD = rules
|
||||
|
||||
# If you use consistent-hashing you can add redundancy by replicating every
|
||||
# datapoint to more than one machine.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# For REPLICATION_FACTOR >=2, set DIVERSE_REPLICAS to True to guarantee replicas
|
||||
# across distributed hosts. With this setting disabled, it's possible that replicas
|
||||
# may be sent to different caches on the same host. This has been the default
|
||||
# behavior since introduction of 'consistent-hashing' relay method.
|
||||
# Note that enabling this on an existing pre-0.9.14 cluster will require rebalancing
|
||||
# your metrics across the cluster nodes using a tool like Carbonate.
|
||||
#DIVERSE_REPLICAS = True
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
@ -261,20 +378,71 @@ REPLICATION_FACTOR = 1
|
||||
# must be defined in this list
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
# This define the protocol to use to contact the destination. It can be
|
||||
# set to one of "line", "pickle", "udp" and "protobuf". This list can be
|
||||
# extended with CarbonClientFactory plugins and defaults to "pickle".
|
||||
# DESTINATION_PROTOCOL = pickle
|
||||
|
||||
# When using consistent hashing it sometime makes sense to make
|
||||
# the ring dynamic when you don't want to loose points when a
|
||||
# single destination is down. Replication is an answer to that
|
||||
# but it can be quite expensive.
|
||||
# DYNAMIC_ROUTER = False
|
||||
|
||||
# Controls the number of connection attempts before marking a
|
||||
# destination as down. We usually do one connection attempt per
|
||||
# second.
|
||||
# DYNAMIC_ROUTER_MAX_RETRIES = 5
|
||||
|
||||
# This is the maximum number of datapoints that can be queued up
|
||||
# for a single destination. Once this limit is hit, we will
|
||||
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
|
||||
# we will drop any subsequently received datapoints.
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons. If
|
||||
# your queue is large, setting this to a lower number will cause the
|
||||
# relay to forward smaller discrete chunks of stats, which may prevent
|
||||
# overloading on the receiving side after a disconnect.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
|
||||
# Limit the number of open connections the receiver can handle as any time.
|
||||
# Default is no limit. Setting up a limit for sites handling high volume
|
||||
# traffic may be recommended to avoid running out of TCP memory or having
|
||||
# thousands of TCP connections reduce the throughput of the service.
|
||||
#MAX_RECEIVER_CONNECTIONS = inf
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon-relay runs as the user that invokes it
|
||||
# USER =
|
||||
|
||||
# This is the percentage that the queue must be empty before it will accept
|
||||
# more messages. For a larger site, if the queue is very large it makes sense
|
||||
# to tune this to allow for incoming stats. So if you have an average
|
||||
# flow of 100k stats/minute, and a MAX_QUEUE_SIZE of 3,000,000, it makes sense
|
||||
# to allow stats to start flowing when you've cleared the queue to 95% since
|
||||
# you should have space to accommodate the next minute's worth of stats
|
||||
# even before the relay incrementally clears more of the queue
|
||||
QUEUE_LOW_WATERMARK_PCT = 0.8
|
||||
|
||||
# To allow for batch efficiency from the pickle protocol and to benefit from
|
||||
# other batching advantages, all writes are deferred by putting them into a queue,
|
||||
# and then the queue is flushed and sent a small fraction of a second later.
|
||||
TIME_TO_DEFER_SENDING = 0.0001
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
# data until the send queues fall below QUEUE_LOW_WATERMARK_PCT * MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# If enabled this setting is used to timeout metric client connection if no
|
||||
# metrics have been sent in specified time in seconds
|
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
|
||||
# missing or empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
@ -282,7 +450,40 @@ USE_FLOW_CONTROL = True
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
#
|
||||
# In order to turn off logging of successful connections for the line
|
||||
# receiver, set this to False
|
||||
# LOG_LISTENER_CONN_SUCCESS = True
|
||||
|
||||
# If you're connecting from the relay to a destination that's over the
|
||||
# internet or similarly iffy connection, a backlog can develop because
|
||||
# of internet weather conditions, e.g. acks getting lost or similar issues.
|
||||
# To deal with that, you can enable USE_RATIO_RESET which will let you
|
||||
# re-set the connection to an individual destination. Defaults to being off.
|
||||
USE_RATIO_RESET=False
|
||||
|
||||
# When there is a small number of stats flowing, it's not desirable to
|
||||
# perform any actions based on percentages - it's just too "twitchy".
|
||||
MIN_RESET_STAT_FLOW=1000
|
||||
|
||||
# When the ratio of stats being sent in a reporting interval is far
|
||||
# enough from 1.0, we will disconnect the socket and reconnecto to
|
||||
# clear out queued stats. The default ratio of 0.9 indicates that 10%
|
||||
# of stats aren't being delivered within one CARBON_METRIC_INTERVAL
|
||||
# (default of 60 seconds), which can lead to a queue backup. Under
|
||||
# some circumstances re-setting the connection can fix this, so
|
||||
# set this according to your tolerance, and look in the logs for
|
||||
# "resetConnectionForQualityReasons" to observe whether this is kicking
|
||||
# in when your sent queue is building up.
|
||||
MIN_RESET_RATIO=0.9
|
||||
|
||||
# The minimum time between resets. When a connection is re-set, we
|
||||
# need to wait before another reset is performed.
|
||||
# (2*CARBON_METRIC_INTERVAL) + 1 second is the minimum time needed
|
||||
# before stats for the new connection will be available. Setting this
|
||||
# below (2*CARBON_METRIC_INTERVAL) + 1 second will result in a lot of
|
||||
# reset connections for no good reason.
|
||||
MIN_RESET_INTERVAL=121
|
||||
|
||||
[aggregator]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
@ -291,14 +492,17 @@ LINE_RECEIVER_PORT = 2023
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2024
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# If set true, metric received will be forwarded to DESTINATIONS in addition to
|
||||
# the output of the aggregation rules. If set false the carbon-aggregator will
|
||||
# only ever send the output of aggregation.
|
||||
FORWARD_ALL = True
|
||||
|
||||
# Filenames of the configuration files to use for this instance of aggregator.
|
||||
# Filenames are relative to CONF_DIR.
|
||||
#
|
||||
# AGGREGATION_RULES = aggregation-rules.conf
|
||||
# REWRITE_RULES = rewrite-rules.conf
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
@ -330,6 +534,10 @@ MAX_QUEUE_SIZE = 10000
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# If enabled this setting is used to timeout metric client connection if no
|
||||
# metrics have been sent in specified time in seconds
|
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
@ -339,6 +547,12 @@ MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
|
||||
MAX_AGGREGATION_INTERVALS = 5
|
||||
|
||||
# Limit the number of open connections the receiver can handle as any time.
|
||||
# Default is no limit. Setting up a limit for sites handling high volume
|
||||
# traffic may be recommended to avoid running out of TCP memory or having
|
||||
# thousands of TCP connections reduce the throughput of the service.
|
||||
#MAX_RECEIVER_CONNECTIONS = inf
|
||||
|
||||
# By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back
|
||||
# aggregated data points once every rule.frequency seconds, on a per-rule basis.
|
||||
# Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points
|
||||
@ -348,8 +562,8 @@ MAX_AGGREGATION_INTERVALS = 5
|
||||
# WRITE_BACK_FREQUENCY = 0
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
|
||||
# missing or empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
@ -357,3 +571,24 @@ MAX_AGGREGATION_INTERVALS = 5
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
# In order to turn off logging of successful connections for the line
|
||||
# receiver, set this to False
|
||||
# LOG_LISTENER_CONN_SUCCESS = True
|
||||
|
||||
# In order to turn off logging of metrics with no corresponding
|
||||
# aggregation rules receiver, set this to False
|
||||
# LOG_AGGREGATOR_MISSES = False
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon-aggregator runs as the user that invokes it
|
||||
# USER =
|
||||
|
||||
# Part of the code, and particularly aggregator rules, need
|
||||
# to cache metric names. To avoid leaking too much memory you
|
||||
# can tweak the size of this cache. The default allow for 1M
|
||||
# different metrics per rule (~200MiB).
|
||||
# CACHE_METRIC_NAMES_MAX=1000000
|
||||
|
||||
# You can optionally set a ttl to this cache.
|
||||
# CACHE_METRIC_NAMES_TTL=600
|
||||
|
@ -40,4 +40,3 @@ aggregationMethod = sum
|
||||
pattern = .*
|
||||
xFilesFactor = 0.3
|
||||
aggregationMethod = average
|
||||
|
||||
|
@ -1,4 +1,23 @@
|
||||
# Schema definitions for Whisper files. Entries are scanned in order,
|
||||
# and first match wins. This file is scanned for changes every 60 seconds.
|
||||
#
|
||||
# Definition Syntax:
|
||||
#
|
||||
# [name]
|
||||
# pattern = regex
|
||||
# retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ...
|
||||
#
|
||||
# Remember: To support accurate aggregation from higher to lower resolution
|
||||
# archives, the precision of a longer retention archive must be
|
||||
# cleanly divisible by precision of next lower retention archive.
|
||||
#
|
||||
# Valid: 60s:7d,300s:30d (300/60 = 5)
|
||||
# Invalid: 180s:7d,300s:30d (300/180 = 3.333)
|
||||
#
|
||||
|
||||
# Carbon's internal metrics. This entry should match what is specified in
|
||||
# CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings
|
||||
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
2
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file → Executable file
2
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file → Executable file
@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings
|
||||
# PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings
|
21
docker/blocks/graphite1/docker-compose.yaml
Normal file
21
docker/blocks/graphite1/docker-compose.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
graphite:
|
||||
build:
|
||||
context: blocks/graphite1
|
||||
args:
|
||||
version: master
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
- "8125:8125/udp"
|
||||
- "8126:8126"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
@ -1,16 +0,0 @@
|
||||
graphite:
|
||||
build: blocks/graphite1
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
@ -1,76 +0,0 @@
|
||||
[cache]
|
||||
LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
LOG_UPDATES = False
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
@ -1,102 +0,0 @@
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from django.utils.timezone import get_current_timezone
|
||||
from django.core.urlresolvers import get_script_prefix
|
||||
from django.http import HttpResponse
|
||||
from django.shortcuts import render_to_response, get_object_or_404
|
||||
from pytz import timezone
|
||||
|
||||
from graphite.util import json
|
||||
from graphite.events import models
|
||||
from graphite.render.attime import parseATTime
|
||||
|
||||
|
||||
def to_timestamp(dt):
|
||||
return time.mktime(dt.timetuple())
|
||||
|
||||
|
||||
class EventEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return to_timestamp(obj)
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def view_events(request):
|
||||
if request.method == "GET":
|
||||
context = { 'events' : fetch(request),
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("events.html", context)
|
||||
else:
|
||||
return post_event(request)
|
||||
|
||||
def detail(request, event_id):
|
||||
e = get_object_or_404(models.Event, pk=event_id)
|
||||
context = { 'event' : e,
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("event.html", context)
|
||||
|
||||
|
||||
def post_event(request):
|
||||
if request.method == 'POST':
|
||||
event = json.loads(request.body)
|
||||
assert isinstance(event, dict)
|
||||
|
||||
values = {}
|
||||
values["what"] = event["what"]
|
||||
values["tags"] = event.get("tags", None)
|
||||
values["when"] = datetime.datetime.fromtimestamp(
|
||||
event.get("when", time.time()))
|
||||
if "data" in event:
|
||||
values["data"] = event["data"]
|
||||
|
||||
e = models.Event(**values)
|
||||
e.save()
|
||||
|
||||
return HttpResponse(status=200)
|
||||
else:
|
||||
return HttpResponse(status=405)
|
||||
|
||||
def get_data(request):
|
||||
if 'jsonp' in request.REQUEST:
|
||||
response = HttpResponse(
|
||||
"%s(%s)" % (request.REQUEST.get('jsonp'),
|
||||
json.dumps(fetch(request), cls=EventEncoder)),
|
||||
mimetype='text/javascript')
|
||||
else:
|
||||
response = HttpResponse(
|
||||
json.dumps(fetch(request), cls=EventEncoder),
|
||||
mimetype="application/json")
|
||||
return response
|
||||
|
||||
def fetch(request):
|
||||
#XXX we need to move to USE_TZ=True to get rid of naive-time conversions
|
||||
def make_naive(dt):
|
||||
if 'tz' in request.GET:
|
||||
tz = timezone(request.GET['tz'])
|
||||
else:
|
||||
tz = get_current_timezone()
|
||||
local_dt = dt.astimezone(tz)
|
||||
if hasattr(local_dt, 'normalize'):
|
||||
local_dt = local_dt.normalize()
|
||||
return local_dt.replace(tzinfo=None)
|
||||
|
||||
if request.GET.get("from", None) is not None:
|
||||
time_from = make_naive(parseATTime(request.GET["from"]))
|
||||
else:
|
||||
time_from = datetime.datetime.fromtimestamp(0)
|
||||
|
||||
if request.GET.get("until", None) is not None:
|
||||
time_until = make_naive(parseATTime(request.GET["until"]))
|
||||
else:
|
||||
time_until = datetime.datetime.now()
|
||||
|
||||
tags = request.GET.get("tags", None)
|
||||
if tags is not None:
|
||||
tags = request.GET.get("tags").split(" ")
|
||||
|
||||
return [x.as_dict() for x in
|
||||
models.Event.find_events(time_from, time_until, tags=tags)]
|
@ -1,20 +0,0 @@
|
||||
[
|
||||
{
|
||||
"pk": 1,
|
||||
"model": "auth.user",
|
||||
"fields": {
|
||||
"username": "admin",
|
||||
"first_name": "",
|
||||
"last_name": "",
|
||||
"is_active": true,
|
||||
"is_superuser": true,
|
||||
"is_staff": true,
|
||||
"last_login": "2011-09-20 17:02:14",
|
||||
"groups": [],
|
||||
"user_permissions": [],
|
||||
"password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236",
|
||||
"email": "root@example.com",
|
||||
"date_joined": "2011-09-20 17:02:14"
|
||||
}
|
||||
}
|
||||
]
|
@ -1,42 +0,0 @@
|
||||
# Edit this file to override the default graphite settings, do not edit settings.py
|
||||
|
||||
# Turn on debugging and restart apache if you ever see an "Internal Server Error" page
|
||||
#DEBUG = True
|
||||
|
||||
# Set your local timezone (django will try to figure this out automatically)
|
||||
TIME_ZONE = 'UTC'
|
||||
|
||||
# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely
|
||||
#MEMCACHE_HOSTS = ['127.0.0.1:11211']
|
||||
|
||||
# Sometimes you need to do a lot of rendering work but cannot share your storage mount
|
||||
#REMOTE_RENDERING = True
|
||||
#RENDERING_HOSTS = ['fastserver01','fastserver02']
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
|
||||
# If you've got more than one backend server they should all be listed here
|
||||
#CLUSTER_SERVERS = []
|
||||
|
||||
# Override this if you need to provide documentation specific to your graphite deployment
|
||||
#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite"
|
||||
|
||||
# Enable email-related features
|
||||
#SMTP_SERVER = "mail.mycompany.com"
|
||||
|
||||
# LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
|
||||
# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!)
|
||||
#DATABASE_ENGINE = 'mysql' # or 'postgres'
|
||||
#DATABASE_NAME = 'graphite'
|
||||
#DATABASE_USER = 'graphite'
|
||||
#DATABASE_PASSWORD = 'graphite-is-awesome'
|
||||
#DATABASE_HOST = 'mysql.mycompany.com'
|
||||
#DATABASE_PORT = '3306'
|
@ -1 +0,0 @@
|
||||
grafana:$apr1$4R/20xhC$8t37jPP5dbcLr48btdkU//
|
@ -1,70 +0,0 @@
|
||||
daemon off;
|
||||
user www-data;
|
||||
worker_processes 1;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
server_tokens off;
|
||||
|
||||
server_names_hash_bucket_size 32;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
|
||||
open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Server $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
client_max_body_size 10m;
|
||||
client_body_buffer_size 128k;
|
||||
|
||||
proxy_connect_timeout 90;
|
||||
proxy_send_timeout 90;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 32k;
|
||||
proxy_busy_buffers_size 64k;
|
||||
proxy_temp_file_write_size 64k;
|
||||
}
|
||||
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
add_header Access-Control-Allow-Methods "GET, OPTIONS";
|
||||
add_header Access-Control-Allow-Headers "origin, authorization, accept";
|
||||
|
||||
location /content {
|
||||
alias /opt/graphite/webapp/content;
|
||||
|
||||
}
|
||||
|
||||
location /media {
|
||||
alias /usr/share/pyshared/django/contrib/admin/media;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
{
|
||||
graphitePort: 2003,
|
||||
graphiteHost: "127.0.0.1",
|
||||
port: 8125,
|
||||
mgmt_port: 8126,
|
||||
backends: ['./backends/graphite'],
|
||||
debug: true
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
[min]
|
||||
pattern = \.min$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.max$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.5
|
||||
aggregationMethod = average
|
@ -1,16 +0,0 @@
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
@ -1,26 +0,0 @@
|
||||
[supervisord]
|
||||
nodaemon = true
|
||||
environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf'
|
||||
|
||||
[program:nginx]
|
||||
command = /usr/sbin/nginx
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:carbon-cache]
|
||||
;user = www-data
|
||||
command = /opt/graphite/bin/carbon-cache.py --debug start
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:graphite-webapp]
|
||||
;user = www-data
|
||||
directory = /opt/graphite/webapp
|
||||
environment = PYTHONPATH='/opt/graphite/webapp'
|
||||
command = /usr/bin/gunicorn_django -b127.0.0.1:8000 -w2 graphite/settings.py
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
17
docker/blocks/influxdb/docker-compose.yaml
Normal file
17
docker/blocks/influxdb/docker-compose.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
influxdb:
|
||||
image: influxdb:latest
|
||||
container_name: influxdb
|
||||
ports:
|
||||
- "2004:2004"
|
||||
- "8083:8083"
|
||||
- "8086:8086"
|
||||
volumes:
|
||||
- ./blocks/influxdb/influxdb.conf:/etc/influxdb/influxdb.conf
|
||||
|
||||
fake-influxdb-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: influxdb
|
||||
FD_PORT: 8086
|
||||
|
@ -1,17 +0,0 @@
|
||||
influxdb:
|
||||
image: influxdb:latest
|
||||
container_name: influxdb
|
||||
ports:
|
||||
- "2004:2004"
|
||||
- "8083:8083"
|
||||
- "8086:8086"
|
||||
volumes:
|
||||
- ./blocks/influxdb/influxdb.conf:/etc/influxdb/influxdb.conf
|
||||
|
||||
fake-influxdb-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: influxdb
|
||||
FD_PORT: 8086
|
||||
|
6
docker/blocks/jaeger/docker-compose.yaml
Normal file
6
docker/blocks/jaeger/docker-compose.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
jaeger:
|
||||
image: jaegertracing/all-in-one:latest
|
||||
ports:
|
||||
- "127.0.0.1:6831:6831/udp"
|
||||
- "16686:16686"
|
||||
|
@ -1,6 +0,0 @@
|
||||
jaeger:
|
||||
image: jaegertracing/all-in-one:latest
|
||||
ports:
|
||||
- "localhost:6831:6831/udp"
|
||||
- "16686:16686"
|
||||
|
5
docker/blocks/memcached/docker-compose.yaml
Normal file
5
docker/blocks/memcached/docker-compose.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
memcached:
|
||||
image: memcached:latest
|
||||
ports:
|
||||
- "11211:11211"
|
||||
|
@ -1,5 +0,0 @@
|
||||
memcached:
|
||||
image: memcached:latest
|
||||
ports:
|
||||
- "11211:11211"
|
||||
|
14
docker/blocks/mysql/docker-compose.yaml
Normal file
14
docker/blocks/mysql/docker-compose.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
mysql:
|
||||
image: mysql:latest
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: grafana
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3306:3306"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all]
|
||||
|
@ -1,14 +0,0 @@
|
||||
mysql:
|
||||
image: mysql:latest
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: grafana
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3306:3306"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all]
|
||||
|
9
docker/blocks/mysql_opendata/docker-compose.yaml
Normal file
9
docker/blocks/mysql_opendata/docker-compose.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
mysql_opendata:
|
||||
build: blocks/mysql_opendata
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: testdata
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3307:3306"
|
@ -1,9 +0,0 @@
|
||||
mysql_opendata:
|
||||
build: blocks/mysql_opendata
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: testdata
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3307:3306"
|
9
docker/blocks/mysql_tests/docker-compose.yaml
Normal file
9
docker/blocks/mysql_tests/docker-compose.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
mysqltests:
|
||||
image: mysql:latest
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: grafana_tests
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3306:3306"
|
@ -1,9 +0,0 @@
|
||||
mysqltests:
|
||||
image: mysql:latest
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: grafana_tests
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3306:3306"
|
@ -1,6 +1,6 @@
|
||||
FROM debian:jessie
|
||||
|
||||
MAINTAINER Christian Luginbühl <dinke@pimprecords.com>
|
||||
LABEL maintainer="Christian Luginbühl <dinke@pimprecords.com>"
|
||||
|
||||
ENV OPENLDAP_VERSION 2.4.40
|
||||
|
||||
|
10
docker/blocks/openldap/docker-compose.yaml
Normal file
10
docker/blocks/openldap/docker-compose.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
openldap:
|
||||
build: blocks/openldap
|
||||
environment:
|
||||
SLAPD_PASSWORD: grafana
|
||||
SLAPD_DOMAIN: grafana.org
|
||||
SLAPD_ADDITIONAL_MODULES: memberof
|
||||
ports:
|
||||
- "389:389"
|
||||
|
||||
|
@ -1,10 +0,0 @@
|
||||
openldap:
|
||||
build: blocks/openldap
|
||||
environment:
|
||||
SLAPD_PASSWORD: grafana
|
||||
SLAPD_DOMAIN: grafana.org
|
||||
SLAPD_ADDITIONAL_MODULES: memberof
|
||||
ports:
|
||||
- "389:389"
|
||||
|
||||
|
11
docker/blocks/opentsdb/docker-compose.yaml
Normal file
11
docker/blocks/opentsdb/docker-compose.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
opentsdb:
|
||||
image: opower/opentsdb:latest
|
||||
ports:
|
||||
- "4242:4242"
|
||||
|
||||
fake-opentsdb-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: opentsdb
|
||||
|
@ -1,11 +0,0 @@
|
||||
opentsdb:
|
||||
image: opower/opentsdb:latest
|
||||
ports:
|
||||
- "4242:4242"
|
||||
|
||||
fake-opentsdb-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: opentsdb
|
||||
|
9
docker/blocks/postgres/docker-compose.yaml
Normal file
9
docker/blocks/postgres/docker-compose.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
postgrestest:
|
||||
image: postgres:latest
|
||||
environment:
|
||||
POSTGRES_USER: grafana
|
||||
POSTGRES_PASSWORD: password
|
||||
POSTGRES_DATABASE: grafana
|
||||
ports:
|
||||
- "5432:5432"
|
||||
command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql
|
@ -1,9 +0,0 @@
|
||||
postgrestest:
|
||||
image: postgres:9.4.14
|
||||
environment:
|
||||
POSTGRES_USER: grafana
|
||||
POSTGRES_PASSWORD: password
|
||||
POSTGRES_DATABASE: grafana
|
||||
ports:
|
||||
- "5432:5432"
|
||||
command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql
|
7
docker/blocks/postgres_tests/docker-compose.yaml
Normal file
7
docker/blocks/postgres_tests/docker-compose.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
postgrestest:
|
||||
image: postgres:latest
|
||||
environment:
|
||||
POSTGRES_USER: grafanatest
|
||||
POSTGRES_PASSWORD: grafanatest
|
||||
ports:
|
||||
- "5432:5432"
|
@ -1,7 +0,0 @@
|
||||
postgrestest:
|
||||
image: postgres:latest
|
||||
environment:
|
||||
POSTGRES_USER: grafanatest
|
||||
POSTGRES_PASSWORD: grafanatest
|
||||
ports:
|
||||
- "5432:5432"
|
25
docker/blocks/prometheus/docker-compose.yaml
Normal file
25
docker/blocks/prometheus/docker-compose.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
prometheus:
|
||||
build: blocks/prometheus
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
node_exporter:
|
||||
image: prom/node-exporter
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9100:9100"
|
||||
|
||||
fake-prometheus-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9091:9091"
|
||||
environment:
|
||||
FD_DATASOURCE: prom
|
||||
|
||||
alertmanager:
|
||||
image: quay.io/prometheus/alertmanager
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9093:9093"
|
@ -1,25 +0,0 @@
|
||||
prometheus:
|
||||
build: blocks/prometheus
|
||||
net: host
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
node_exporter:
|
||||
image: prom/node-exporter
|
||||
net: host
|
||||
ports:
|
||||
- "9100:9100"
|
||||
|
||||
fake-prometheus-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: host
|
||||
ports:
|
||||
- "9091:9091"
|
||||
environment:
|
||||
FD_DATASOURCE: prom
|
||||
|
||||
alertmanager:
|
||||
image: quay.io/prometheus/alertmanager
|
||||
net: host
|
||||
ports:
|
||||
- "9093:9093"
|
3
docker/blocks/prometheus2/Dockerfile
Normal file
3
docker/blocks/prometheus2/Dockerfile
Normal file
@ -0,0 +1,3 @@
|
||||
FROM prom/prometheus:v2.0.0
|
||||
ADD prometheus.yml /etc/prometheus/
|
||||
ADD alert.rules /etc/prometheus/
|
10
docker/blocks/prometheus2/alert.rules
Normal file
10
docker/blocks/prometheus2/alert.rules
Normal file
@ -0,0 +1,10 @@
|
||||
# Alert Rules
|
||||
|
||||
ALERT AppCrash
|
||||
IF process_open_fds > 0
|
||||
FOR 15s
|
||||
LABELS { severity="critical" }
|
||||
ANNOTATIONS {
|
||||
summary = "Number of open fds > 0",
|
||||
description = "Just testing"
|
||||
}
|
35
docker/blocks/prometheus2/prometheus.yml
Normal file
35
docker/blocks/prometheus2/prometheus.yml
Normal file
@ -0,0 +1,35 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 10s # By default, scrape targets every 15 seconds.
|
||||
evaluation_interval: 10s # By default, scrape targets every 15 seconds.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
|
||||
#rule_files:
|
||||
# - "alert.rules"
|
||||
# - "first.rules"
|
||||
# - "second.rules"
|
||||
|
||||
# alerting:
|
||||
# alertmanagers:
|
||||
# - scheme: http
|
||||
# static_configs:
|
||||
# - targets:
|
||||
# - "127.0.0.1:9093"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'node_exporter'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9100']
|
||||
|
||||
- job_name: 'fake-data-gen'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9091']
|
||||
|
||||
- job_name: 'grafana'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:3000']
|
@ -1,5 +1,5 @@
|
||||
FROM centos:centos7
|
||||
MAINTAINER Przemyslaw Ozgo <linux@ozgo.info>
|
||||
LABEL maintainer="Przemyslaw Ozgo <linux@ozgo.info>"
|
||||
|
||||
RUN \
|
||||
yum update -y && \
|
||||
|
4
docker/blocks/smtp/docker-compose.yaml
Normal file
4
docker/blocks/smtp/docker-compose.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
snmpd:
|
||||
image: namshi/smtp
|
||||
ports:
|
||||
- "25:25"
|
@ -1,4 +0,0 @@
|
||||
snmpd:
|
||||
image: namshi/smtp
|
||||
ports:
|
||||
- "25:25"
|
2
docker/compose_header.yml
Normal file
2
docker/compose_header.yml
Normal file
@ -0,0 +1,2 @@
|
||||
version: "2"
|
||||
services:
|
@ -7,8 +7,9 @@ template_dir=templates
|
||||
grafana_config_file=conf.tmp
|
||||
grafana_config=config
|
||||
|
||||
fig_file=docker-compose.yml
|
||||
fig_config=fig
|
||||
compose_header_file=compose_header.yml
|
||||
fig_file=docker-compose.yaml
|
||||
fig_config=docker-compose.yaml
|
||||
|
||||
if [ "$#" == 0 ]; then
|
||||
blocks=`ls $blocks_dir`
|
||||
@ -23,13 +24,16 @@ if [ "$#" == 0 ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for file in $gogs_config_file $fig_file; do
|
||||
for file in $grafana_config_file $fig_file; do
|
||||
if [ -e $file ]; then
|
||||
echo "Deleting $file"
|
||||
rm $file
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Adding Compose header to $fig_file"
|
||||
cat $compose_header_file >> $fig_file
|
||||
|
||||
for dir in $@; do
|
||||
current_dir=$blocks_dir/$dir
|
||||
if [ ! -d "$current_dir" ]; then
|
||||
@ -45,7 +49,7 @@ for dir in $@; do
|
||||
|
||||
if [ -e $current_dir/$fig_config ]; then
|
||||
echo "Adding $current_dir/$fig_config to $fig_file"
|
||||
cat $current_dir/fig >> $fig_file
|
||||
cat $current_dir/$fig_config >> $fig_file
|
||||
echo "" >> $fig_file
|
||||
fi
|
||||
done
|
||||
|
166
docs/sources/administration/provisioning.md
Normal file
166
docs/sources/administration/provisioning.md
Normal file
@ -0,0 +1,166 @@
|
||||
+++
|
||||
title = "Provisioning"
|
||||
description = ""
|
||||
keywords = ["grafana", "provisioning"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
parent = "admin"
|
||||
weight = 8
|
||||
+++
|
||||
|
||||
# Provisioning Grafana
|
||||
|
||||
## Config file
|
||||
|
||||
Checkout the [configuration](/installation/configuration) page for more information about what you can configure in `grafana.ini`
|
||||
|
||||
### Config file locations
|
||||
|
||||
- Default configuration from `$WORKING_DIR/conf/defaults.ini`
|
||||
- Custom configuration from `$WORKING_DIR/conf/custom.ini`
|
||||
- The custom configuration file path can be overridden using the `--config` parameter
|
||||
|
||||
> **Note.** If you have installed Grafana using the `deb` or `rpm`
|
||||
> packages, then your configuration file is located at
|
||||
> `/etc/grafana/grafana.ini`. This path is specified in the Grafana
|
||||
> init.d script using `--config` file parameter.
|
||||
|
||||
### Using environment variables
|
||||
|
||||
All options in the configuration file (listed below) can be overridden
|
||||
using environment variables using the syntax:
|
||||
|
||||
```bash
|
||||
GF_<SectionName>_<KeyName>
|
||||
```
|
||||
|
||||
Where the section name is the text within the brackets. Everything
|
||||
should be upper case, `.` should be replaced by `_`. For example, given these configuration settings:
|
||||
|
||||
```bash
|
||||
# default section
|
||||
instance_name = ${HOSTNAME}
|
||||
|
||||
[security]
|
||||
admin_user = admin
|
||||
|
||||
[auth.google]
|
||||
client_secret = 0ldS3cretKey
|
||||
```
|
||||
|
||||
Then you can override them using:
|
||||
|
||||
```bash
|
||||
export GF_DEFAULT_INSTANCE_NAME=my-instance
|
||||
export GF_SECURITY_ADMIN_USER=true
|
||||
export GF_AUTH_GOOGLE_CLIENT_SECRET=newS3cretKey
|
||||
```
|
||||
|
||||
<hr />
|
||||
|
||||
## Configuration management tools
|
||||
|
||||
Currently we do not provide any scripts/manifests for configuring Grafana. Rather then spending time learning and creating scripts/manifests for each tool, we think our time is better spent making Grafana easier to provision. Therefor, we heavily relay on the expertise of he community.
|
||||
|
||||
Tool | Project
|
||||
-----|------------
|
||||
Puppet | [https://forge.puppet.com/puppet/grafana](https://forge.puppet.com/puppet/grafana)
|
||||
Ansible | [https://github.com/picotrading/ansible-grafana](https://github.com/picotrading/ansible-grafana)
|
||||
Chef | [https://github.com/JonathanTron/chef-grafana](https://github.com/JonathanTron/chef-grafana)
|
||||
Saltstack | [https://github.com/salt-formulas/salt-formula-grafana](https://github.com/salt-formulas/salt-formula-grafana)
|
||||
|
||||
## Datasources
|
||||
|
||||
> This feature is available from v4.7
|
||||
|
||||
It's possible to manage datasources in Grafana by adding one or more yaml config files in the [`conf/datasources`](/installation/configuration/#datasources) directory. Each config file can contain a list of `datasources` that will be added or updated during start up. If the datasource already exists, Grafana will update it to match the configuration file. The config file can also contain a list of datasources that should be deleted. That list is called `delete_datasources`. Grafana will delete datasources listed in `delete_datasources` before inserting/updating those in the `datasource` list.
|
||||
|
||||
### Running multiple grafana instances.
|
||||
If you are running multiple instances of Grafana you might run into problems if they have different versions of the datasource.yaml configuration file. The best way to solve this problem is to add a version number to each datasource in the configuration and increase it when you update the config. Grafana will only update datasources with the same or lower version number than specified in the config. That way old configs cannot overwrite newer configs if they restart at the same time.
|
||||
|
||||
### Example datasource config file
|
||||
```yaml
|
||||
# list of datasources that should be deleted from the database
|
||||
delete_datasources:
|
||||
- name: Graphite
|
||||
org_id: 1
|
||||
|
||||
# list of datasources to insert/update depending
|
||||
# whats available in the datbase
|
||||
datasources:
|
||||
# <string, required> name of the datasource. Required
|
||||
- name: Graphite
|
||||
# <string, required> datasource type. Required
|
||||
type: graphite
|
||||
# <string, required> access mode. direct or proxy. Required
|
||||
access: proxy
|
||||
# <int> org id. will default to org_id 1 if not specified
|
||||
org_id: 1
|
||||
# <string> url
|
||||
url: http://localhost:8080
|
||||
# <string> database password, if used
|
||||
password:
|
||||
# <string> database user, if used
|
||||
user:
|
||||
# <string> database name, if used
|
||||
database:
|
||||
# <bool> enable/disable basic auth
|
||||
basic_auth:
|
||||
# <string> basic auth username
|
||||
basic_auth_user:
|
||||
# <string> basic auth password
|
||||
basic_auth_password:
|
||||
# <bool> enable/disable with credentials headers
|
||||
with_credentials:
|
||||
# <bool> mark as default datasource. Max one per org
|
||||
is_default:
|
||||
# <map> fields that will be converted to json and stored in json_data
|
||||
json_data:
|
||||
graphiteVersion: "1.1"
|
||||
tlsAuth: true
|
||||
tlsAuthWithCACert: true
|
||||
# <string> json object of data that will be encrypted.
|
||||
secure_json_data:
|
||||
tlsCACert: "..."
|
||||
tlsClientCert: "..."
|
||||
tlsClientKey: "..."
|
||||
version: 1
|
||||
# <bool> allow users to edit datasources from the UI.
|
||||
editable: false
|
||||
```
|
||||
|
||||
#### Json data
|
||||
|
||||
Since all datasources dont have the same configuration settings we only have the most common ones as fields. The rest should be stored as a json blob in the `json_data` field. Here are the most common settings that the core datasources use.
|
||||
|
||||
| Name | Type | Datasource |Description |
|
||||
| ----| ---- | ---- | --- |
|
||||
| tlsAuth | boolean | *All* | Enable TLS authentication using client cert configured in secure json data |
|
||||
| tlsAuthWithCACert | boolean | *All* | Enable TLS authtication using CA cert |
|
||||
| graphiteVersion | string | Graphite | Graphite version |
|
||||
| timeInterval | string | Elastic, Influxdb & Prometheus | Lowest interval/step value that should be used for this data source |
|
||||
| esVersion | string | Elastic | Elasticsearch version |
|
||||
| timeField | string | Elastic | Which field that should be used as timestamp |
|
||||
| interval | string | Elastic | Index date time format |
|
||||
| authType | string | Cloudwatch | Auth provider. keys/credentials/arn |
|
||||
| assumeRoleArn | string | Cloudwatch | ARN of Assume Role |
|
||||
| defaultRegion | string | Cloudwatch | AWS region |
|
||||
| customMetricsNamespaces | string | Cloudwatch | Namespaces of Custom Metrics |
|
||||
| tsdbVersion | string | OpenTsdb | Version |
|
||||
| tsdbResolution | string | OpenTsdb | Resolution |
|
||||
| sslmode | string | Postgre | SSLmode. 'disable', 'require', 'verify-ca' or 'verify-full' |
|
||||
|
||||
|
||||
#### Secure Json data
|
||||
|
||||
{"authType":"keys","defaultRegion":"us-west-2","timeField":"@timestamp"}
|
||||
|
||||
Secure json data is a map of settings that will be encrypted with [secret key](/installation/configuration/#secret-key) from the grafana config. The purpose of this is only to hide content from the users of the application. This should be used for storing TLS Cert and password that Grafana will append to request on the server side. All these settings are optional.
|
||||
|
||||
| Name | Type | Datasource | Description |
|
||||
| ----| ---- | ---- | --- |
|
||||
| tlsCACert | string | *All* |CA cert for out going requests |
|
||||
| tlsClientCert | string | *All* |TLS Client cert for outgoing requests |
|
||||
| tlsClientKey | string | *All* |TLS Client key for outgoing requests |
|
||||
| password | string | Postgre | password |
|
||||
| user | string | Postgre | user |
|
@ -45,10 +45,10 @@ Macro example | Description
|
||||
------------ | -------------
|
||||
*$__time(dateColumn)* | Will be replaced by an expression to rename the column to `time`. For example, *dateColumn as time*
|
||||
*$__timeSec(dateColumn)* | Will be replaced by an expression to rename the column to `time` and converting the value to unix timestamp. For example, *extract(epoch from dateColumn) as time*
|
||||
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn > to_timestamp(1494410783) AND dateColumn < to_timestamp(1494497183)*
|
||||
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *extract(epoch from dateColumn) BETWEEN 1494410783 AND 1494497183*
|
||||
*$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *to_timestamp(1494410783)*
|
||||
*$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *to_timestamp(1494497183)*
|
||||
*$__timeGroup(dateColumn,'5m')* | Will be replaced by an expression usable in GROUP BY clause. For example, *(extract(epoch from "dateColumn")/extract(epoch from '5m'::interval))::int*extract(epoch from '5m'::interval)*
|
||||
*$__timeGroup(dateColumn,'5m')* | Will be replaced by an expression usable in GROUP BY clause. For example, *(extract(epoch from "dateColumn")/300)::bigint*300*
|
||||
*$__unixEpochFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name with times represented as unix timestamp. For example, *dateColumn > 1494410783 AND dateColumn < 1494497183*
|
||||
*$__unixEpochFrom()* | Will be replaced by the start of the currently active time selection as unix timestamp. For example, *1494410783*
|
||||
*$__unixEpochTo()* | Will be replaced by the end of the currently active time selection as unix timestamp. For example, *1494497183*
|
||||
@ -186,7 +186,7 @@ ORDER BY atimestamp ASC
|
||||
|
||||
## Annotations
|
||||
|
||||
[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation queries via the Dashboard menu / Annotations view.
|
||||
[Annotations]({{< relref "reference/annotations.md" >}}) allow you to overlay rich event information on top of graphs. You add annotation queries via the Dashboard menu / Annotations view.
|
||||
|
||||
An example query:
|
||||
|
||||
|
@ -34,6 +34,7 @@ Name | Description
|
||||
*Basic Auth* | Enable basic authentication to the Prometheus data source.
|
||||
*User* | Name of your Prometheus user
|
||||
*Password* | Database user's password
|
||||
*Scrape interval* | This will be used as a lower limit for the Prometheus step query parameter. Default value is 15s.
|
||||
|
||||
## Query editor
|
||||
|
||||
@ -95,3 +96,7 @@ Prometheus supports two ways to query annotations.
|
||||
- A Prometheus query for pending and firing alerts (for details see [Inspecting alerts during runtime](https://prometheus.io/docs/alerting/rules/#inspecting-alerts-during-runtime))
|
||||
|
||||
The step option is useful to limit the number of events returned from your query.
|
||||
|
||||
## Getting Grafana metrics into Prometheus
|
||||
|
||||
Since 4.6.0 Grafana exposes metrics for Prometheus on the `/metrics` endpoint. We also bundle a dashboard within Grafana so you can get started viewing your metrics faster. You can import the bundled dashboard by going to the data source edit page and click the dashboard tab. There you can find a dashboard for Grafana and one for Prometheus. Import and start viewing all the metrics!
|
||||
|
@ -17,7 +17,7 @@ This make is much easier to verify functionally since the data can be shared ver
|
||||
|
||||
## Enable
|
||||
|
||||
`Grafana TestData` is not enabled by default. To enable it you have to go to `/plugins/testdata/edit` and click the enable button to enable.
|
||||
`Grafana TestData` is not enabled by default. To enable it, first navigate to the Plugins section, found in your Grafana main menu. Click the Apps tabs in the Plugins section and select the Grafana TestData App. (Or navigate to http://your_grafana_instance/plugins/testdata/edit to go directly there). Finally click the enable button to enable.
|
||||
|
||||
## Create mock data.
|
||||
|
||||
|
@ -89,7 +89,7 @@ Content-Type: application/json
|
||||
|
||||
## Create Annotation
|
||||
|
||||
Creates an annotation in the Grafana database. The `dashboardId` and `panelId` fields are optional. If they are not specified then a global annotation is created and can be queried in any dashboard that adds the Grafana annotations data source.
|
||||
Creates an annotation in the Grafana database. The `dashboardId` and `panelId` fields are optional. If they are not specified then a global annotation is created and can be queried in any dashboard that adds the Grafana annotations data source. When creating a region annotation the response will include both `id` and `endId`, if not only `id`.
|
||||
|
||||
`POST /api/annotations`
|
||||
|
||||
@ -117,7 +117,11 @@ Content-Type: application/json
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Annotation added"}
|
||||
{
|
||||
"message":"Annotation added",
|
||||
"id": 1,
|
||||
"endId": 2
|
||||
}
|
||||
```
|
||||
|
||||
## Create Annotation in Graphite format
|
||||
@ -148,7 +152,10 @@ Content-Type: application/json
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Graphite annotation added"}
|
||||
{
|
||||
"message":"Graphite annotation added",
|
||||
"id": 1
|
||||
}
|
||||
```
|
||||
|
||||
## Update Annotation
|
||||
|
@ -258,7 +258,7 @@ Query parameters:
|
||||
**Example Request**:
|
||||
|
||||
```http
|
||||
GET /api/search?query=MyDashboard&starred=true&tag=prod HTTP/1.1
|
||||
GET /api/search?query=Production%20Overview&starred=true&tag=prod HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
@ -276,8 +276,8 @@ Content-Type: application/json
|
||||
"title":"Production Overview",
|
||||
"uri":"db/production-overview",
|
||||
"type":"dash-db",
|
||||
"tags":[],
|
||||
"isStarred":false
|
||||
"tags":[prod],
|
||||
"isStarred":true
|
||||
}
|
||||
]
|
||||
```
|
||||
```
|
||||
|
@ -46,8 +46,8 @@ those options.
|
||||
- [Graphite]({{< relref "features/datasources/graphite.md" >}})
|
||||
- [Elasticsearch]({{< relref "features/datasources/elasticsearch.md" >}})
|
||||
- [InfluxDB]({{< relref "features/datasources/influxdb.md" >}})
|
||||
- [Prometheus]({{< relref "features/datasources/influxdb.md" >}})
|
||||
- [OpenTSDB]({{< relref "features/datasources/prometheus.md" >}})
|
||||
- [Prometheus]({{< relref "features/datasources/prometheus.md" >}})
|
||||
- [OpenTSDB]({{< relref "features/datasources/opentsdb.md" >}})
|
||||
- [MySQL]({{< relref "features/datasources/mysql.md" >}})
|
||||
- [Postgres]({{< relref "features/datasources/postgres.md" >}})
|
||||
- [Cloudwatch]({{< relref "features/datasources/cloudwatch.md" >}})
|
||||
|
@ -87,6 +87,14 @@ command line in the init.d script or the systemd service file. It can
|
||||
be overridden in the configuration file or in the default environment variable
|
||||
file.
|
||||
|
||||
### plugins
|
||||
|
||||
Directory where grafana will automatically scan and look for plugins
|
||||
|
||||
### datasources
|
||||
|
||||
Config files containing datasources that will be configured at startup
|
||||
|
||||
## [server]
|
||||
|
||||
### http_addr
|
||||
@ -224,6 +232,9 @@ The maximum number of connections in the idle connection pool.
|
||||
### max_open_conn
|
||||
The maximum number of open connections to the database.
|
||||
|
||||
### log_queries
|
||||
Set to `true` to log the sql calls and execution times.
|
||||
|
||||
<hr />
|
||||
|
||||
## [security]
|
||||
@ -551,7 +562,7 @@ session provider you have configured.
|
||||
|
||||
- **file:** session file path, e.g. `data/sessions`
|
||||
- **mysql:** go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
|
||||
- **postgres:** ex: user=a password=b host=localhost port=5432 dbname=c sslmode=require
|
||||
- **postgres:** ex: user=a password=b host=localhost port=5432 dbname=c sslmode=verify-full
|
||||
- **memcache:** ex: 127.0.0.1:11211
|
||||
- **redis:** ex: `addr=127.0.0.1:6379,pool_size=100,prefix=grafana`
|
||||
|
||||
@ -580,7 +591,7 @@ CREATE TABLE session (
|
||||
);
|
||||
```
|
||||
|
||||
Postgres valid `sslmode` are `disable`, `require` (default), `verify-ca`, and `verify-full`.
|
||||
Postgres valid `sslmode` are `disable`, `require`, `verify-ca`, and `verify-full` (default).
|
||||
|
||||
### cookie_name
|
||||
|
||||
@ -613,6 +624,12 @@ Analytics ID here. By default this feature is disabled.
|
||||
|
||||
<hr />
|
||||
|
||||
## [dashboards]
|
||||
|
||||
### versions_to_keep (introduced in v5.0)
|
||||
|
||||
Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1.
|
||||
|
||||
## [dashboards.json]
|
||||
|
||||
If you have a system that automatically builds dashboards as json files you can enable this feature to have the
|
||||
@ -673,7 +690,7 @@ Ex `filters = sqlstore:debug`
|
||||
## [metrics]
|
||||
|
||||
### enabled
|
||||
Enable metrics reporting. defaults true. Available via HTTP API `/api/metrics`.
|
||||
Enable metrics reporting. defaults true. Available via HTTP API `/metrics`.
|
||||
|
||||
### interval_seconds
|
||||
|
||||
|
@ -15,7 +15,7 @@ weight = 1
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for Debian-based Linux | [grafana_4.6.0_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.6.0_amd64.deb)
|
||||
Stable for Debian-based Linux | [grafana_4.6.2_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.6.2_amd64.deb)
|
||||
|
||||
<!-- Beta for Debian-based Linux | [grafana_4.5.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb) -->
|
||||
|
||||
@ -26,9 +26,9 @@ installation.
|
||||
|
||||
|
||||
```bash
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.6.0_amd64.deb
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.6.2_amd64.deb
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_4.6.0_amd64.deb
|
||||
sudo dpkg -i grafana_4.6.2_amd64.deb
|
||||
```
|
||||
|
||||
<!--
|
||||
|
@ -15,7 +15,7 @@ weight = 2
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.6.0 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.0-1.x86_64.rpm)
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.6.2 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.2-1.x86_64.rpm)
|
||||
|
||||
<!-- Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-beta1.x86_64.rpm) -->
|
||||
|
||||
@ -27,7 +27,7 @@ installation.
|
||||
You can install Grafana using Yum directly.
|
||||
|
||||
```bash
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.0-1.x86_64.rpm
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.2-1.x86_64.rpm
|
||||
```
|
||||
|
||||
Or install manually using `rpm`.
|
||||
@ -35,15 +35,15 @@ Or install manually using `rpm`.
|
||||
#### On CentOS / Fedora / Redhat:
|
||||
|
||||
```bash
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.0-1.x86_64.rpm
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.2-1.x86_64.rpm
|
||||
$ sudo yum install initscripts fontconfig
|
||||
$ sudo rpm -Uvh grafana-4.6.0-1.x86_64.rpm
|
||||
$ sudo rpm -Uvh grafana-4.6.2-1.x86_64.rpm
|
||||
```
|
||||
|
||||
#### On OpenSuse:
|
||||
|
||||
```bash
|
||||
$ sudo rpm -i --nodeps grafana-4.6.0-1.x86_64.rpm
|
||||
$ sudo rpm -i --nodeps grafana-4.6.2-1.x86_64.rpm
|
||||
```
|
||||
|
||||
## Install via YUM Repository
|
||||
|
@ -13,7 +13,7 @@ weight = 3
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Latest stable package for Windows | [grafana.4.6.0.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.0.windows-x64.zip)
|
||||
Latest stable package for Windows | [grafana.4.6.2.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.2.windows-x64.zip)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
|
@ -13,9 +13,10 @@ dev environment. Grafana ships with its own required backend server; also comple
|
||||
|
||||
## Dependencies
|
||||
|
||||
- [Go 1.9.1](https://golang.org/dl/)
|
||||
- [NodeJS LTS](https://nodejs.org/download/)
|
||||
- [Go 1.9.2](https://golang.org/dl/)
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
- [NodeJS LTS](https://nodejs.org/download/)
|
||||
- node-gyp is the Node.js native addon build tool and it requires extra dependencies: python 2.7, make and GCC. These are already installed for most Linux distros and MacOS. See the Building On Windows section or the [node-gyp installation instructions](https://github.com/nodejs/node-gyp#installation) for more details.
|
||||
|
||||
## Get Code
|
||||
Create a directory for the project and set your path accordingly (or use the [default Go workspace directory](https://golang.org/doc/code.html#GOPATH)). Then download and install Grafana into your $GOPATH directory:
|
||||
@ -40,8 +41,8 @@ go run build.go build # (or 'go build ./pkg/cmd/grafana-server')
|
||||
```
|
||||
|
||||
#### Building on Windows
|
||||
The Grafana backend includes Sqlite3 which requires GCC to compile. So in order to compile Grafana on windows you need
|
||||
to install GCC. We recommend [TDM-GCC](http://tdm-gcc.tdragon.net/download).
|
||||
|
||||
The Grafana backend includes Sqlite3 which requires GCC to compile. So in order to compile Grafana on windows you need to install GCC. We recommend [TDM-GCC](http://tdm-gcc.tdragon.net/download).
|
||||
|
||||
[node-gyp](https://github.com/nodejs/node-gyp#installation) is the Node.js native addon build tool and it requires extra dependencies to be installed on Windows. In a command prompt which is run as administrator, run:
|
||||
|
||||
|
@ -25,12 +25,16 @@ enabled = true
|
||||
header_name = X-WEBAUTH-USER
|
||||
header_property = username
|
||||
auto_sign_up = true
|
||||
ldap_sync_ttl = 60
|
||||
whitelist =
|
||||
```
|
||||
|
||||
* **enabled**: this is to toggle the feature on or off
|
||||
* **header_name**: this is the HTTP header name that passes the username or email address of the authenticated user to Grafana. Grafana will trust what ever username is contained in this header and automatically log the user in.
|
||||
* **header_property**: this tells Grafana whether the value in the header_name is a username or an email address. (In Grafana you can log in using your account username or account email)
|
||||
* **auto_sign_up**: If set to true, Grafana will automatically create user accounts in the Grafana DB if one does not exist. If set to false, users who do not exist in the GrafanaDB won’t be able to log in, even though their username and password are valid.
|
||||
* **ldap_sync_ttl**: When both auth.proxy and auth.ldap are enabled, user's organisation and role are synchronised from ldap after the http proxy authentication. You can force ldap re-synchronisation after `ldap_sync_ttl` minutes.
|
||||
* **whitelist**: Comma separated list of trusted authentication proxies IP.
|
||||
|
||||
With a fresh install of Grafana, using the above configuration for the authProxy feature, we can send a simple API call to list all users. The only user that will be present is the default “Admin” user that is added the first time Grafana starts up. As you can see all we need to do to authenticate the request is to provide the “X-WEBAUTH-USER” header.
|
||||
|
||||
|
@ -1,10 +1,15 @@
|
||||
|
||||
module.exports = {
|
||||
verbose: false,
|
||||
"globals": {
|
||||
"ts-jest": {
|
||||
"tsConfigFile": "tsconfig.json"
|
||||
}
|
||||
},
|
||||
"transform": {
|
||||
"^.+\\.tsx?$": "<rootDir>/node_modules/ts-jest/preprocessor.js"
|
||||
},
|
||||
"moduleDirectories": ["<rootDir>/node_modules", "<rootDir>/public"],
|
||||
"moduleDirectories": ["node_modules", "public"],
|
||||
"roots": [
|
||||
"<rootDir>/public"
|
||||
],
|
||||
|
12
package.json
12
package.json
@ -95,12 +95,12 @@
|
||||
"zone.js": "^0.7.2"
|
||||
},
|
||||
"scripts": {
|
||||
"dev": "node ./node_modules/.bin/webpack --progress --colors --config scripts/webpack/webpack.dev.js",
|
||||
"watch": "node ./node_modules/.bin/webpack --progress --colors --watch --config scripts/webpack/webpack.dev.js",
|
||||
"build": "node ./node_modules/.bin/grunt build",
|
||||
"test": "node ./node_modules/.bin/grunt test",
|
||||
"test:coverage": "node ./node_modules/.bin/grunt test --coverage=true",
|
||||
"lint": "node ./node_modules/.bin/tslint -c tslint.json --project tsconfig.json --type-check",
|
||||
"dev": "webpack --progress --colors --config scripts/webpack/webpack.dev.js",
|
||||
"watch": "webpack --progress --colors --watch --config scripts/webpack/webpack.dev.js",
|
||||
"build": "grunt build",
|
||||
"test": "grunt test",
|
||||
"test:coverage": "grunt test --coverage=true",
|
||||
"lint": "tslint -c tslint.json --project tsconfig.json --type-check",
|
||||
"karma": "node ./node_modules/grunt-cli/bin/grunt karma:dev",
|
||||
"jest": "node ./node_modules/jest-cli/bin/jest.js --notify --watch",
|
||||
"precommit": "node ./node_modules/grunt-cli/bin/grunt precommit"
|
||||
|
@ -1,5 +1,5 @@
|
||||
#! /usr/bin/env bash
|
||||
version=4.5.2
|
||||
version=4.6.2
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
|
||||
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
"github.com/grafana/grafana/pkg/services/annotations"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
func GetAnnotations(c *middleware.Context) Response {
|
||||
@ -21,6 +22,7 @@ func GetAnnotations(c *middleware.Context) Response {
|
||||
PanelId: c.QueryInt64("panelId"),
|
||||
Limit: c.QueryInt64("limit"),
|
||||
Tags: c.QueryStrings("tags"),
|
||||
Type: c.Query("type"),
|
||||
}
|
||||
|
||||
repo := annotations.GetRepository()
|
||||
@ -75,9 +77,11 @@ func PostAnnotation(c *middleware.Context, cmd dtos.PostAnnotationsCmd) Response
|
||||
return ApiError(500, "Failed to save annotation", err)
|
||||
}
|
||||
|
||||
startID := item.Id
|
||||
|
||||
// handle regions
|
||||
if cmd.IsRegion {
|
||||
item.RegionId = item.Id
|
||||
item.RegionId = startID
|
||||
|
||||
if item.Data == nil {
|
||||
item.Data = simplejson.New()
|
||||
@ -93,9 +97,18 @@ func PostAnnotation(c *middleware.Context, cmd dtos.PostAnnotationsCmd) Response
|
||||
if err := repo.Save(&item); err != nil {
|
||||
return ApiError(500, "Failed save annotation for region end time", err)
|
||||
}
|
||||
|
||||
return Json(200, util.DynMap{
|
||||
"message": "Annotation added",
|
||||
"id": startID,
|
||||
"endId": item.Id,
|
||||
})
|
||||
}
|
||||
|
||||
return ApiSuccess("Annotation added")
|
||||
return Json(200, util.DynMap{
|
||||
"message": "Annotation added",
|
||||
"id": startID,
|
||||
})
|
||||
}
|
||||
|
||||
func formatGraphiteAnnotation(what string, data string) string {
|
||||
@ -154,7 +167,10 @@ func PostGraphiteAnnotation(c *middleware.Context, cmd dtos.PostGraphiteAnnotati
|
||||
return ApiError(500, "Failed to save Graphite annotation", err)
|
||||
}
|
||||
|
||||
return ApiSuccess("Graphite annotation added")
|
||||
return Json(200, util.DynMap{
|
||||
"message": "Graphite annotation added",
|
||||
"id": item.Id,
|
||||
})
|
||||
}
|
||||
|
||||
func UpdateAnnotation(c *middleware.Context, cmd dtos.UpdateAnnotationsCmd) Response {
|
||||
|
@ -212,10 +212,10 @@ func (hs *HttpServer) registerRoutes() {
|
||||
// Data sources
|
||||
apiRoute.Group("/datasources", func(datasourceRoute RouteRegister) {
|
||||
datasourceRoute.Get("/", wrap(GetDataSources))
|
||||
datasourceRoute.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), AddDataSource)
|
||||
datasourceRoute.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), wrap(AddDataSource))
|
||||
datasourceRoute.Put("/:id", bind(m.UpdateDataSourceCommand{}), wrap(UpdateDataSource))
|
||||
datasourceRoute.Delete("/:id", DeleteDataSourceById)
|
||||
datasourceRoute.Delete("/name/:name", DeleteDataSourceByName)
|
||||
datasourceRoute.Delete("/:id", wrap(DeleteDataSourceById))
|
||||
datasourceRoute.Delete("/name/:name", wrap(DeleteDataSourceByName))
|
||||
datasourceRoute.Get("/:id", wrap(GetDataSourceById))
|
||||
datasourceRoute.Get("/name/:name", wrap(GetDataSourceByName))
|
||||
}, reqOrgAdmin)
|
||||
@ -340,8 +340,8 @@ func (hs *HttpServer) registerRoutes() {
|
||||
r.Any("/api/gnet/*", reqSignedIn, ProxyGnetRequest)
|
||||
|
||||
// Gravatar service.
|
||||
avt := avatar.CacheServer()
|
||||
r.Get("/avatar/:hash", avt.ServeHTTP)
|
||||
avatarCacheServer := avatar.NewCacheServer()
|
||||
r.Get("/avatar/:hash", avatarCacheServer.Handler)
|
||||
|
||||
// Websocket
|
||||
r.Any("/ws", hs.streamManager.Serve)
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"gopkg.in/macaron.v1"
|
||||
)
|
||||
|
||||
var gravatarSource string
|
||||
@ -89,12 +90,12 @@ func (this *Avatar) Update() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
type service struct {
|
||||
type CacheServer struct {
|
||||
notFound *Avatar
|
||||
cache map[string]*Avatar
|
||||
}
|
||||
|
||||
func (this *service) mustInt(r *http.Request, defaultValue int, keys ...string) (v int) {
|
||||
func (this *CacheServer) mustInt(r *http.Request, defaultValue int, keys ...string) (v int) {
|
||||
for _, k := range keys {
|
||||
if _, err := fmt.Sscanf(r.FormValue(k), "%d", &v); err == nil {
|
||||
defaultValue = v
|
||||
@ -103,8 +104,8 @@ func (this *service) mustInt(r *http.Request, defaultValue int, keys ...string)
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func (this *service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
urlPath := r.URL.Path
|
||||
func (this *CacheServer) Handler(ctx *macaron.Context) {
|
||||
urlPath := ctx.Req.URL.Path
|
||||
hash := urlPath[strings.LastIndex(urlPath, "/")+1:]
|
||||
|
||||
var avatar *Avatar
|
||||
@ -126,20 +127,24 @@ func (this *service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
this.cache[hash] = avatar
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "image/jpeg")
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(avatar.data.Bytes())))
|
||||
w.Header().Set("Cache-Control", "private, max-age=3600")
|
||||
ctx.Resp.Header().Add("Content-Type", "image/jpeg")
|
||||
|
||||
if err := avatar.Encode(w); err != nil {
|
||||
if !setting.EnableGzip {
|
||||
ctx.Resp.Header().Add("Content-Length", strconv.Itoa(len(avatar.data.Bytes())))
|
||||
}
|
||||
|
||||
ctx.Resp.Header().Add("Cache-Control", "private, max-age=3600")
|
||||
|
||||
if err := avatar.Encode(ctx.Resp); err != nil {
|
||||
log.Warn("avatar encode error: %v", err)
|
||||
w.WriteHeader(500)
|
||||
ctx.WriteHeader(500)
|
||||
}
|
||||
}
|
||||
|
||||
func CacheServer() http.Handler {
|
||||
func NewCacheServer() *CacheServer {
|
||||
UpdateGravatarSource()
|
||||
|
||||
return &service{
|
||||
return &CacheServer{
|
||||
notFound: newNotFound(),
|
||||
cache: make(map[string]*Avatar),
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ func GetDataSources(c *middleware.Context) Response {
|
||||
BasicAuth: ds.BasicAuth,
|
||||
IsDefault: ds.IsDefault,
|
||||
JsonData: ds.JsonData,
|
||||
ReadOnly: ds.ReadOnly,
|
||||
}
|
||||
|
||||
if plugin, exists := plugins.DataSources[ds.Type]; exists {
|
||||
@ -68,59 +69,70 @@ func GetDataSourceById(c *middleware.Context) Response {
|
||||
return Json(200, &dtos)
|
||||
}
|
||||
|
||||
func DeleteDataSourceById(c *middleware.Context) {
|
||||
func DeleteDataSourceById(c *middleware.Context) Response {
|
||||
id := c.ParamsInt64(":id")
|
||||
|
||||
if id <= 0 {
|
||||
c.JsonApiErr(400, "Missing valid datasource id", nil)
|
||||
return
|
||||
return ApiError(400, "Missing valid datasource id", nil)
|
||||
}
|
||||
|
||||
ds, err := getRawDataSourceById(id, c.OrgId)
|
||||
if err != nil {
|
||||
return ApiError(400, "Failed to delete datasource", nil)
|
||||
}
|
||||
|
||||
if ds.ReadOnly {
|
||||
return ApiError(403, "Cannot delete read-only data source", nil)
|
||||
}
|
||||
|
||||
cmd := &m.DeleteDataSourceByIdCommand{Id: id, OrgId: c.OrgId}
|
||||
|
||||
err := bus.Dispatch(cmd)
|
||||
err = bus.Dispatch(cmd)
|
||||
if err != nil {
|
||||
c.JsonApiErr(500, "Failed to delete datasource", err)
|
||||
return
|
||||
return ApiError(500, "Failed to delete datasource", err)
|
||||
}
|
||||
|
||||
c.JsonOK("Data source deleted")
|
||||
return ApiSuccess("Data source deleted")
|
||||
}
|
||||
|
||||
func DeleteDataSourceByName(c *middleware.Context) {
|
||||
func DeleteDataSourceByName(c *middleware.Context) Response {
|
||||
name := c.Params(":name")
|
||||
|
||||
if name == "" {
|
||||
c.JsonApiErr(400, "Missing valid datasource name", nil)
|
||||
return
|
||||
return ApiError(400, "Missing valid datasource name", nil)
|
||||
}
|
||||
|
||||
getCmd := &m.GetDataSourceByNameQuery{Name: name, OrgId: c.OrgId}
|
||||
if err := bus.Dispatch(getCmd); err != nil {
|
||||
return ApiError(500, "Failed to delete datasource", err)
|
||||
}
|
||||
|
||||
if getCmd.Result.ReadOnly {
|
||||
return ApiError(403, "Cannot delete read-only data source", nil)
|
||||
}
|
||||
|
||||
cmd := &m.DeleteDataSourceByNameCommand{Name: name, OrgId: c.OrgId}
|
||||
|
||||
err := bus.Dispatch(cmd)
|
||||
if err != nil {
|
||||
c.JsonApiErr(500, "Failed to delete datasource", err)
|
||||
return
|
||||
return ApiError(500, "Failed to delete datasource", err)
|
||||
}
|
||||
|
||||
c.JsonOK("Data source deleted")
|
||||
return ApiSuccess("Data source deleted")
|
||||
}
|
||||
|
||||
func AddDataSource(c *middleware.Context, cmd m.AddDataSourceCommand) {
|
||||
func AddDataSource(c *middleware.Context, cmd m.AddDataSourceCommand) Response {
|
||||
cmd.OrgId = c.OrgId
|
||||
|
||||
if err := bus.Dispatch(&cmd); err != nil {
|
||||
if err == m.ErrDataSourceNameExists {
|
||||
c.JsonApiErr(409, err.Error(), err)
|
||||
return
|
||||
return ApiError(409, err.Error(), err)
|
||||
}
|
||||
|
||||
c.JsonApiErr(500, "Failed to add datasource", err)
|
||||
return
|
||||
return ApiError(500, "Failed to add datasource", err)
|
||||
}
|
||||
|
||||
ds := convertModelToDtos(cmd.Result)
|
||||
c.JSON(200, util.DynMap{
|
||||
return Json(200, util.DynMap{
|
||||
"message": "Datasource added",
|
||||
"id": cmd.Result.Id,
|
||||
"name": cmd.Result.Name,
|
||||
@ -160,11 +172,14 @@ func fillWithSecureJsonData(cmd *m.UpdateDataSourceCommand) error {
|
||||
}
|
||||
|
||||
ds, err := getRawDataSourceById(cmd.Id, cmd.OrgId)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ds.ReadOnly {
|
||||
return m.ErrDatasourceIsReadOnly
|
||||
}
|
||||
|
||||
secureJsonData := ds.SecureJsonData.Decrypt()
|
||||
for k, v := range secureJsonData {
|
||||
|
||||
@ -201,6 +216,7 @@ func GetDataSourceByName(c *middleware.Context) Response {
|
||||
}
|
||||
|
||||
dtos := convertModelToDtos(query.Result)
|
||||
dtos.ReadOnly = true
|
||||
return Json(200, &dtos)
|
||||
}
|
||||
|
||||
@ -242,6 +258,7 @@ func convertModelToDtos(ds *m.DataSource) dtos.DataSource {
|
||||
JsonData: ds.JsonData,
|
||||
SecureJsonFields: map[string]bool{},
|
||||
Version: ds.Version,
|
||||
ReadOnly: ds.ReadOnly,
|
||||
}
|
||||
|
||||
for k, v := range ds.SecureJsonData {
|
||||
|
@ -26,6 +26,7 @@ type DataSource struct {
|
||||
JsonData *simplejson.Json `json:"jsonData,omitempty"`
|
||||
SecureJsonFields map[string]bool `json:"secureJsonFields"`
|
||||
Version int `json:"version"`
|
||||
ReadOnly bool `json:"readOnly"`
|
||||
}
|
||||
|
||||
type DataSourceListItemDTO struct {
|
||||
@ -42,6 +43,7 @@ type DataSourceListItemDTO struct {
|
||||
BasicAuth bool `json:"basicAuth"`
|
||||
IsDefault bool `json:"isDefault"`
|
||||
JsonData *simplejson.Json `json:"jsonData,omitempty"`
|
||||
ReadOnly bool `json:"readOnly"`
|
||||
}
|
||||
|
||||
type DataSourceList []DataSourceListItemDTO
|
||||
|
@ -146,12 +146,13 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron {
|
||||
m := macaron.New()
|
||||
|
||||
m.Use(middleware.Logger())
|
||||
m.Use(middleware.Recovery())
|
||||
|
||||
if setting.EnableGzip {
|
||||
m.Use(middleware.Gziper())
|
||||
}
|
||||
|
||||
m.Use(middleware.Recovery())
|
||||
|
||||
for _, route := range plugins.StaticRoutes {
|
||||
pluginRoute := path.Join("/public/plugins/", route.PluginId)
|
||||
hs.log.Debug("Plugins: Adding route", "route", pluginRoute, "dir", route.Directory)
|
||||
@ -193,7 +194,8 @@ func (hs *HttpServer) metricsEndpoint(ctx *macaron.Context) {
|
||||
}
|
||||
|
||||
func (hs *HttpServer) healthHandler(ctx *macaron.Context) {
|
||||
if ctx.Req.Method != "GET" || ctx.Req.URL.Path != "/api/health" {
|
||||
notHeadOrGet := ctx.Req.Method != http.MethodGet && ctx.Req.Method != http.MethodHead
|
||||
if notHeadOrGet || ctx.Req.URL.Path != "/api/health" {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -81,8 +81,6 @@ func (rr *routeRegister) Register(router Router) *macaron.Router {
|
||||
}
|
||||
|
||||
func (rr *routeRegister) route(pattern, method string, handlers ...macaron.Handler) {
|
||||
//inject tracing
|
||||
|
||||
h := make([]macaron.Handler, 0)
|
||||
for _, fn := range rr.namedMiddleware {
|
||||
h = append(h, fn(pattern))
|
||||
|
@ -15,7 +15,6 @@ func Search(c *middleware.Context) {
|
||||
starred := c.Query("starred")
|
||||
limit := c.QueryInt("limit")
|
||||
dashboardType := c.Query("type")
|
||||
folderId := c.QueryInt64("folderId")
|
||||
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
@ -29,6 +28,14 @@ func Search(c *middleware.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
folderIds := make([]int64, 0)
|
||||
for _, id := range c.QueryStrings("folderIds") {
|
||||
folderId, err := strconv.ParseInt(id, 10, 64)
|
||||
if err == nil {
|
||||
folderIds = append(folderIds, folderId)
|
||||
}
|
||||
}
|
||||
|
||||
searchQuery := search.Query{
|
||||
Title: query,
|
||||
Tags: tags,
|
||||
@ -38,7 +45,7 @@ func Search(c *middleware.Context) {
|
||||
OrgId: c.OrgId,
|
||||
DashboardIds: dbids,
|
||||
Type: dashboardType,
|
||||
FolderId: folderId,
|
||||
FolderIds: folderIds,
|
||||
}
|
||||
|
||||
err := bus.Dispatch(&searchQuery)
|
||||
|
@ -16,7 +16,6 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/metrics"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
|
||||
_ "github.com/grafana/grafana/pkg/services/alerting/conditions"
|
||||
@ -88,11 +87,6 @@ func main() {
|
||||
server.Start()
|
||||
}
|
||||
|
||||
func initSql() {
|
||||
sqlstore.NewEngine()
|
||||
sqlstore.EnsureAdminUser()
|
||||
}
|
||||
|
||||
func listenToSystemSignals(server models.GrafanaServer) {
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
ignoreChan := make(chan os.Signal, 1)
|
||||
|
@ -9,6 +9,9 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/cmd/grafana-cli/logger"
|
||||
"github.com/grafana/grafana/pkg/services/provisioning"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api"
|
||||
@ -21,7 +24,9 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/cleanup"
|
||||
"github.com/grafana/grafana/pkg/services/notifications"
|
||||
"github.com/grafana/grafana/pkg/services/search"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
|
||||
"github.com/grafana/grafana/pkg/social"
|
||||
"github.com/grafana/grafana/pkg/tracing"
|
||||
)
|
||||
@ -54,12 +59,19 @@ func (g *GrafanaServerImpl) Start() {
|
||||
g.writePIDFile()
|
||||
|
||||
initSql()
|
||||
|
||||
metrics.Init(setting.Cfg)
|
||||
search.Init()
|
||||
login.Init()
|
||||
social.NewOAuthService()
|
||||
plugins.Init()
|
||||
|
||||
if err := provisioning.StartUp(setting.DatasourcesPath); err != nil {
|
||||
logger.Error("Failed to provision Grafana from config", "error", err)
|
||||
g.Shutdown(1, "Startup failed")
|
||||
return
|
||||
}
|
||||
|
||||
closer, err := tracing.Init(setting.Cfg)
|
||||
if err != nil {
|
||||
g.log.Error("Tracing settings is not valid", "error", err)
|
||||
@ -87,6 +99,11 @@ func (g *GrafanaServerImpl) Start() {
|
||||
g.startHttpServer()
|
||||
}
|
||||
|
||||
func initSql() {
|
||||
sqlstore.NewEngine()
|
||||
sqlstore.EnsureAdminUser()
|
||||
}
|
||||
|
||||
func (g *GrafanaServerImpl) initLogging() {
|
||||
err := setting.NewConfigContext(&setting.CommandLineArgs{
|
||||
Config: *configFile,
|
||||
|
@ -225,7 +225,7 @@ func init() {
|
||||
|
||||
M_DataSource_ProxyReq_Timer = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "api_dataproxy_request_all_milliseconds",
|
||||
Help: "summary for dashboard search duration",
|
||||
Help: "summary for dataproxy request duration",
|
||||
Namespace: exporterName,
|
||||
})
|
||||
|
||||
|
@ -363,6 +363,7 @@ type scenarioContext struct {
|
||||
respJson map[string]interface{}
|
||||
handlerFunc handlerFunc
|
||||
defaultHandler macaron.Handler
|
||||
url string
|
||||
|
||||
req *http.Request
|
||||
}
|
||||
|
@ -123,23 +123,22 @@ func Recovery() macaron.Handler {
|
||||
c.Data["ErrorMsg"] = string(stack)
|
||||
}
|
||||
|
||||
c.HTML(500, "500")
|
||||
ctx, ok := c.Data["ctx"].(*Context)
|
||||
|
||||
// // Lookup the current responsewriter
|
||||
// val := c.GetVal(inject.InterfaceOf((*http.ResponseWriter)(nil)))
|
||||
// res := val.Interface().(http.ResponseWriter)
|
||||
//
|
||||
// // respond with panic message while in development mode
|
||||
// var body []byte
|
||||
// if setting.Env == setting.DEV {
|
||||
// res.Header().Set("Content-Type", "text/html")
|
||||
// body = []byte(fmt.Sprintf(panicHtml, err, err, stack))
|
||||
// }
|
||||
//
|
||||
// res.WriteHeader(http.StatusInternalServerError)
|
||||
// if nil != body {
|
||||
// res.Write(body)
|
||||
// }
|
||||
if ok && ctx.IsApiRequest() {
|
||||
resp := make(map[string]interface{})
|
||||
resp["message"] = "Internal Server Error - Check the Grafana server logs for the detailed error message."
|
||||
|
||||
if c.Data["ErrorMsg"] != nil {
|
||||
resp["error"] = fmt.Sprintf("%v - %v", c.Data["Title"], c.Data["ErrorMsg"])
|
||||
} else {
|
||||
resp["error"] = c.Data["Title"]
|
||||
}
|
||||
|
||||
c.JSON(500, resp)
|
||||
} else {
|
||||
c.HTML(500, "500")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user