mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge branch 'master' into develop
This commit is contained in:
commit
1dab313ca3
18
CHANGELOG.md
18
CHANGELOG.md
@ -8,6 +8,24 @@
|
||||
|
||||
* **GitHub OAuth**: Support for GitHub organizations with 100+ teams. [#8846](https://github.com/grafana/grafana/issues/8846), thx [@skwashd](https://github.com/skwashd)
|
||||
* **Graphite**: Calls to Graphite api /metrics/find now include panel or dashboad time range (from & until) in most cases, [#8055](https://github.com/grafana/grafana/issues/8055)
|
||||
* **Graphite**: Added new graphite 1.0 functions, available if you set version to 1.0.x in data source settings. New Functions: mapSeries, reduceSeries, isNonNull, groupByNodes, offsetToZero, grep, weightedAverage, removeEmptySeries, aggregateLine, averageOutsidePercentile, delay, exponentialMovingAverage, fallbackSeries, integralByInterval, interpolate, invert, linearRegression, movingMin, movingMax, movingSum, multiplySeriesWithWildcards, pow, powSeries, removeBetweenPercentile, squareRoot, timeSlice, closes [#8261](https://github.com/grafana/grafana/issues/8261)
|
||||
|
||||
## Changes
|
||||
|
||||
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
|
||||
|
||||
# 4.4.4 (unreleased)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **MySQL/Postgres**: Fix for max_idle_conn option default which was wrongly set to zero which does not mean unlimited but means zero, which in practice kind of disables connection pooling, which is not good. Fixes [#8513](https://github.com/grafana/grafana/issues/8513)
|
||||
|
||||
# 4.4.3 (2017-08-07)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Search**: Fix for issue that casued search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
|
||||
* **Modals**: ESC key now closes modal again, fixes [#8981](https://github.com/grafana/grafana/issues/8988), thx [@j-white](https://github.com/j-white)
|
||||
|
||||
# 4.4.2 (2017-08-01)
|
||||
|
||||
|
@ -76,8 +76,10 @@ password =
|
||||
# Example: mysql://user:secret@host:port/database
|
||||
url =
|
||||
|
||||
# Max idle conn setting default is 2
|
||||
max_idle_conn = 2
|
||||
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
max_idle_conn =
|
||||
max_open_conn =
|
||||
|
||||
# For "postgres", use either "disable", "require" or "verify-full"
|
||||
|
@ -85,8 +85,10 @@
|
||||
# For "sqlite3" only, path relative to data_path setting
|
||||
;path = grafana.db
|
||||
|
||||
# Max idle conn setting default is 2
|
||||
;max_idle_conn = 2
|
||||
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
;max_idle_conn =
|
||||
;max_open_conn =
|
||||
|
||||
|
||||
|
@ -32,6 +32,7 @@ add ./files/my_htpasswd /etc/nginx/.htpasswd
|
||||
# Add system service config
|
||||
add ./files/nginx.conf /etc/nginx/nginx.conf
|
||||
add ./files/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
# Nginx
|
||||
#
|
||||
# graphite
|
||||
@ -39,6 +40,7 @@ expose 80
|
||||
|
||||
# Carbon line receiver port
|
||||
expose 2003
|
||||
|
||||
# Carbon cache query port
|
||||
expose 7002
|
||||
|
||||
|
93
docker/blocks/graphite1/Dockerfile
Normal file
93
docker/blocks/graphite1/Dockerfile
Normal file
@ -0,0 +1,93 @@
|
||||
FROM phusion/baseimage:0.9.22
|
||||
MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com>
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y --force-yes install vim \
|
||||
nginx \
|
||||
python-dev \
|
||||
python-flup \
|
||||
python-pip \
|
||||
python-ldap \
|
||||
expect \
|
||||
git \
|
||||
memcached \
|
||||
sqlite3 \
|
||||
libffi-dev \
|
||||
libcairo2 \
|
||||
libcairo2-dev \
|
||||
python-cairo \
|
||||
python-rrdtool \
|
||||
pkg-config \
|
||||
nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# fix python dependencies (LTS Django and newer memcached/txAMQP)
|
||||
RUN pip install django==1.8.18 \
|
||||
python-memcached==1.53 \
|
||||
txAMQP==0.6.2 \
|
||||
&& pip install --upgrade pip
|
||||
|
||||
# install whisper
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper
|
||||
WORKDIR /usr/local/src/whisper
|
||||
RUN python ./setup.py install
|
||||
|
||||
# install carbon
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon
|
||||
WORKDIR /usr/local/src/carbon
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install graphite
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
WORKDIR /usr/local/src/graphite-web
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
|
||||
WORKDIR /opt/graphite/webapp
|
||||
RUN mkdir -p /var/log/graphite/ \
|
||||
&& PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings
|
||||
|
||||
# install statsd
|
||||
RUN git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
ADD conf/opt/statsd/config.js /opt/statsd/config.js
|
||||
|
||||
# config nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
ADD conf/etc/nginx/nginx.conf /etc/nginx/nginx.conf
|
||||
ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/graphite-statsd.conf
|
||||
|
||||
# init django admin
|
||||
ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp
|
||||
ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh
|
||||
RUN chmod +x /usr/local/bin/manage.sh \
|
||||
&& /usr/local/bin/django_admin_init.exp
|
||||
|
||||
# logging support
|
||||
RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx
|
||||
ADD conf/etc/logrotate.d/graphite-statsd /etc/logrotate.d/graphite-statsd
|
||||
|
||||
# daemons
|
||||
ADD conf/etc/service/carbon/run /etc/service/carbon/run
|
||||
ADD conf/etc/service/carbon-aggregator/run /etc/service/carbon-aggregator/run
|
||||
ADD conf/etc/service/graphite/run /etc/service/graphite/run
|
||||
ADD conf/etc/service/statsd/run /etc/service/statsd/run
|
||||
ADD conf/etc/service/nginx/run /etc/service/nginx/run
|
||||
|
||||
# default conf setup
|
||||
ADD conf /etc/graphite-statsd/conf
|
||||
ADD conf/etc/my_init.d/01_conf_init.sh /etc/my_init.d/01_conf_init.sh
|
||||
|
||||
# cleanup
|
||||
RUN apt-get clean\
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# defaults
|
||||
EXPOSE 80 2003-2004 2023-2024 8125/udp 8126
|
||||
VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"]
|
||||
WORKDIR /
|
||||
ENV HOME /root
|
||||
CMD ["/sbin/my_init"]
|
11
docker/blocks/graphite1/conf/etc/logrotate.d/graphite-statsd
Normal file
11
docker/blocks/graphite1/conf/etc/logrotate.d/graphite-statsd
Normal file
@ -0,0 +1,11 @@
|
||||
/var/log/*.log /var/log/*/*.log {
|
||||
weekly
|
||||
size 50M
|
||||
missingok
|
||||
rotate 10
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
copytruncate
|
||||
su root syslog
|
||||
}
|
36
docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
Executable file
36
docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
Executable file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
conf_dir=/etc/graphite-statsd/conf
|
||||
|
||||
# auto setup graphite with default configs if /opt/graphite is missing
|
||||
# needed for the use case when a docker host volume is mounted at an of the following:
|
||||
# - /opt/graphite
|
||||
# - /opt/graphite/conf
|
||||
# - /opt/graphite/webapp/graphite
|
||||
graphite_dir_contents=$(find /opt/graphite -mindepth 1 -print -quit)
|
||||
graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit)
|
||||
graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit)
|
||||
graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit)
|
||||
if [[ -z $graphite_dir_contents ]]; then
|
||||
git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
cd /usr/local/src/graphite-web && python ./setup.py install
|
||||
fi
|
||||
if [[ -z $graphite_storage_dir_contents ]]; then
|
||||
/usr/local/bin/django_admin_init.exp
|
||||
fi
|
||||
if [[ -z $graphite_conf_dir_contents ]]; then
|
||||
cp -R $conf_dir/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
fi
|
||||
if [[ -z $graphite_webapp_dir_contents ]]; then
|
||||
cp $conf_dir/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
fi
|
||||
|
||||
# auto setup statsd with default config if /opt/statsd is missing
|
||||
# needed for the use case when a docker host volume is mounted at an of the following:
|
||||
# - /opt/statsd
|
||||
statsd_dir_contents=$(find /opt/statsd -mindepth 1 -print -quit)
|
||||
if [[ -z $statsd_dir_contents ]]; then
|
||||
git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
cp $conf_dir/opt/statsd/config.js /opt/statsd/config.js
|
||||
fi
|
||||
|
96
docker/blocks/graphite1/conf/etc/nginx/nginx.conf
Normal file
96
docker/blocks/graphite1/conf/etc/nginx/nginx.conf
Normal file
@ -0,0 +1,96 @@
|
||||
user www-data;
|
||||
worker_processes 4;
|
||||
pid /run/nginx.pid;
|
||||
daemon off;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
##
|
||||
# nginx-naxsi config
|
||||
##
|
||||
# Uncomment it if you installed nginx-naxsi
|
||||
##
|
||||
|
||||
#include /etc/nginx/naxsi_core.rules;
|
||||
|
||||
##
|
||||
# nginx-passenger config
|
||||
##
|
||||
# Uncomment it if you installed nginx-passenger
|
||||
##
|
||||
|
||||
#passenger_root /usr;
|
||||
#passenger_ruby /usr/bin/ruby;
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
||||
|
||||
|
||||
#mail {
|
||||
# # See sample authentication script at:
|
||||
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
|
||||
#
|
||||
# # auth_http localhost/auth.php;
|
||||
# # pop3_capabilities "TOP" "USER";
|
||||
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
|
||||
#
|
||||
# server {
|
||||
# listen localhost:110;
|
||||
# protocol pop3;
|
||||
# proxy on;
|
||||
# }
|
||||
#
|
||||
# server {
|
||||
# listen localhost:143;
|
||||
# protocol imap;
|
||||
# proxy on;
|
||||
# }
|
||||
#}
|
@ -0,0 +1,31 @@
|
||||
server {
|
||||
listen 80;
|
||||
root /opt/graphite/static;
|
||||
index index.html;
|
||||
|
||||
location /media {
|
||||
# django admin static files
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/media/;
|
||||
}
|
||||
|
||||
location /admin/auth/admin {
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
|
||||
}
|
||||
|
||||
location /admin/auth/user/admin {
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type';
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
}
|
||||
|
||||
}
|
4
docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /opt/graphite/storage/carbon-aggregator-a.pid
|
||||
exec /usr/bin/python /opt/graphite/bin/carbon-aggregator.py start --debug 2>&1 >> /var/log/carbon-aggregator.log
|
4
docker/blocks/graphite1/conf/etc/service/carbon/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/carbon/run
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /opt/graphite/storage/carbon-cache-a.pid
|
||||
exec /usr/bin/python /opt/graphite/bin/carbon-cache.py start --debug 2>&1 >> /var/log/carbon.log
|
3
docker/blocks/graphite1/conf/etc/service/graphite/run
Executable file
3
docker/blocks/graphite1/conf/etc/service/graphite/run
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
export PYTHONPATH=/opt/graphite/webapp && exec /usr/local/bin/gunicorn wsgi --workers=4 --bind=127.0.0.1:8080 --log-file=/var/log/gunicorn.log --preload --pythonpath=/opt/graphite/webapp/graphite
|
4
docker/blocks/graphite1/conf/etc/service/nginx/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/nginx/run
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
mkdir -p /var/log/nginx
|
||||
exec /usr/sbin/nginx -c /etc/nginx/nginx.conf
|
4
docker/blocks/graphite1/conf/etc/service/statsd/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/statsd/run
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec /usr/bin/nodejs /opt/statsd/stats.js /opt/statsd/config.js >> /var/log/statsd.log 2>&1
|
||||
|
@ -0,0 +1,35 @@
|
||||
# The form of each line in this file should be as follows:
|
||||
#
|
||||
# output_template (frequency) = method input_pattern
|
||||
#
|
||||
# This will capture any received metrics that match 'input_pattern'
|
||||
# for calculating an aggregate metric. The calculation will occur
|
||||
# every 'frequency' seconds and the 'method' can specify 'sum' or
|
||||
# 'avg'. The name of the aggregate metric will be derived from
|
||||
# 'output_template' filling in any captured fields from 'input_pattern'.
|
||||
#
|
||||
# For example, if you're metric naming scheme is:
|
||||
#
|
||||
# <env>.applications.<app>.<server>.<metric>
|
||||
#
|
||||
# You could configure some aggregations like so:
|
||||
#
|
||||
# <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
|
||||
# <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
|
||||
#
|
||||
# As an example, if the following metrics are received:
|
||||
#
|
||||
# prod.applications.apache.www01.requests
|
||||
# prod.applications.apache.www01.requests
|
||||
#
|
||||
# They would all go into the same aggregation buffer and after 60 seconds the
|
||||
# aggregate metric 'prod.applications.apache.all.requests' would be calculated
|
||||
# by summing their values.
|
||||
#
|
||||
# Template components such as <env> will match everything up to the next dot.
|
||||
# To match metric multiple components including the dots, use <<metric>> in the
|
||||
# input template:
|
||||
#
|
||||
# <env>.applications.<app>.all.<app_metric> (60) = sum <env>.applications.<app>.*.<<app_metric>>
|
||||
#
|
||||
# Note that any time this file is modified, it will be re-read automatically.
|
@ -0,0 +1,5 @@
|
||||
# This file takes a single regular expression per line
|
||||
# If USE_WHITELIST is set to True in carbon.conf, any metrics received which
|
||||
# match one of these expressions will be dropped
|
||||
# This file is reloaded automatically when changes are made
|
||||
^some\.noisy\.metric\.prefix\..*
|
@ -0,0 +1,75 @@
|
||||
# This is a configuration file with AMQP enabled
|
||||
|
||||
[cache]
|
||||
LOCAL_DATA_DIR =
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Enable AMQP if you want to receve metrics using you amqp broker
|
||||
ENABLE_AMQP = True
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
AMQP_VERBOSE = True
|
||||
|
||||
# your credentials for the amqp server
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
|
||||
# the network settings for the amqp server
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
|
||||
# if you want to include the metric name as part of the message body
|
||||
# instead of as the routing key, set this to True
|
||||
# AMQP_METRIC_NAME_IN_BODY = False
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
359
docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf
Normal file
359
docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf
Normal file
@ -0,0 +1,359 @@
|
||||
[cache]
|
||||
# Configure carbon directories.
|
||||
#
|
||||
# OS environment variables can be used to tell carbon where graphite is
|
||||
# installed, where to read configuration from and where to write data.
|
||||
#
|
||||
# GRAPHITE_ROOT - Root directory of the graphite installation.
|
||||
# Defaults to ../
|
||||
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives).
|
||||
# Defaults to $GRAPHITE_ROOT/conf/
|
||||
# GRAPHITE_STORAGE_DIR - Storage directory for whipser/rrd/log/pid files.
|
||||
# Defaults to $GRAPHITE_ROOT/storage/
|
||||
#
|
||||
# To change other directory paths, add settings to this file. The following
|
||||
# configuration variables are available with these default values:
|
||||
#
|
||||
# STORAGE_DIR = $GRAPHITE_STORAGE_DIR
|
||||
# LOCAL_DATA_DIR = STORAGE_DIR/whisper/
|
||||
# WHITELISTS_DIR = STORAGE_DIR/lists/
|
||||
# CONF_DIR = STORAGE_DIR/conf/
|
||||
# LOG_DIR = STORAGE_DIR/log/
|
||||
# PID_DIR = STORAGE_DIR/
|
||||
#
|
||||
# For FHS style directory structures, use:
|
||||
#
|
||||
# STORAGE_DIR = /var/lib/carbon/
|
||||
# CONF_DIR = /etc/carbon/
|
||||
# LOG_DIR = /var/log/carbon/
|
||||
# PID_DIR = /var/run/
|
||||
#
|
||||
#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Enable daily log rotation. If disabled, a kill -HUP can be used after a manual rotate
|
||||
ENABLE_LOGROTATION = True
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
#
|
||||
# NOTE: The above settings must be set under [relay] and [aggregator]
|
||||
# to take effect for those daemons as well
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 500
|
||||
|
||||
# If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a
|
||||
# stop/shutdown is initiated. This helps when MAX_UPDATES_PER_SECOND is
|
||||
# relatively low and carbon has cached a lot of updates; it enables the carbon
|
||||
# daemon to shutdown more quickly.
|
||||
# MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = 50
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
# Set this to True to enable the UDP listener. By default this is off
|
||||
# because it is very common to run multiple carbon daemons and managing
|
||||
# another (rarely used) port for every carbon instance is not fun.
|
||||
ENABLE_UDP_LISTENER = False
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# Per security concerns outlined in Bug #817247 the pickle receiver
|
||||
# will use a more secure and slightly less efficient unpickler.
|
||||
# Set this to True to revert to the old-fashioned insecure unpickler.
|
||||
USE_INSECURE_UNPICKLER = False
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Set this to False to drop datapoints received after the cache
|
||||
# reaches MAX_CACHE_SIZE. If this is True (the default) then sockets
|
||||
# over which metrics are received will temporarily stop accepting
|
||||
# data until the cache size falls below 95% MAX_CACHE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# By default, carbon-cache will log every whisper update and cache hit. This can be excessive and
|
||||
# degrade performance if logging on the same volume as the whisper data is stored.
|
||||
LOG_UPDATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
LOG_CACHE_QUEUE_SORTS = True
|
||||
|
||||
# The thread that writes metrics to disk can use on of the following strategies
|
||||
# determining the order in which metrics are removed from cache and flushed to
|
||||
# disk. The default option preserves the same behavior as has been historically
|
||||
# available in version 0.9.10.
|
||||
#
|
||||
# sorted - All metrics in the cache will be counted and an ordered list of
|
||||
# them will be sorted according to the number of datapoints in the cache at the
|
||||
# moment of the list's creation. Metrics will then be flushed from the cache to
|
||||
# disk in that order.
|
||||
#
|
||||
# max - The writer thread will always pop and flush the metric from cache
|
||||
# that has the most datapoints. This will give a strong flush preference to
|
||||
# frequently updated metrics and will also reduce random file-io. Infrequently
|
||||
# updated metrics may only ever be persisted to disk at daemon shutdown if
|
||||
# there are a large number of metrics which receive very frequent updates OR if
|
||||
# disk i/o is very slow.
|
||||
#
|
||||
# naive - Metrics will be flushed from the cache to disk in an unordered
|
||||
# fashion. This strategy may be desirable in situations where the storage for
|
||||
# whisper files is solid state, CPU resources are very limited or deference to
|
||||
# the OS's i/o scheduler is expected to compensate for the random write
|
||||
# pattern.
|
||||
#
|
||||
CACHE_WRITE_STRATEGY = sorted
|
||||
|
||||
# On some systems it is desirable for whisper to write synchronously.
|
||||
# Set this option to True if you'd like to try this. Basically it will
|
||||
# shift the onus of buffering writes from the kernel into carbon's cache.
|
||||
WHISPER_AUTOFLUSH = False
|
||||
|
||||
# By default new Whisper files are created pre-allocated with the data region
|
||||
# filled with zeros to prevent fragmentation and speed up contiguous reads and
|
||||
# writes (which are common). Enabling this option will cause Whisper to create
|
||||
# the file sparsely instead. Enabling this option may allow a large increase of
|
||||
# MAX_CREATES_PER_MINUTE but may have longer term performance implications
|
||||
# depending on the underlying storage configuration.
|
||||
# WHISPER_SPARSE_CREATE = False
|
||||
|
||||
# Only beneficial on linux filesystems that support the fallocate system call.
|
||||
# It maintains the benefits of contiguous reads/writes, but with a potentially
|
||||
# much faster creation speed, by allowing the kernel to handle the block
|
||||
# allocation and zero-ing. Enabling this option may allow a large increase of
|
||||
# MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported
|
||||
# this option will gracefully fallback to standard POSIX file access methods.
|
||||
WHISPER_FALLOCATE_CREATE = True
|
||||
|
||||
# Enabling this option will cause Whisper to lock each Whisper file it writes
|
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
|
||||
# multiple carbon-cache daemons are writing to the same files
|
||||
# WHISPER_LOCK_WRITES = False
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
# AMQP_METRIC_NAME_IN_BODY = False
|
||||
|
||||
# The manhole interface allows you to SSH into the carbon daemon
|
||||
# and get a python interpreter. BE CAREFUL WITH THIS! If you do
|
||||
# something like time.sleep() in the interpreter, the whole process
|
||||
# will sleep! This is *extremely* helpful in debugging, assuming
|
||||
# you are familiar with the code. If you are not, please don't
|
||||
# mess with this, you are asking for trouble :)
|
||||
#
|
||||
# ENABLE_MANHOLE = False
|
||||
# MANHOLE_INTERFACE = 127.0.0.1
|
||||
# MANHOLE_PORT = 7222
|
||||
# MANHOLE_USER = admin
|
||||
# MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE=
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# To configure special settings for the carbon-cache instance 'b', uncomment this:
|
||||
#[cache:b]
|
||||
#LINE_RECEIVER_PORT = 2103
|
||||
#PICKLE_RECEIVER_PORT = 2104
|
||||
#CACHE_QUERY_PORT = 7102
|
||||
# and any other settings you want to customize, defaults are inherited
|
||||
# from [carbon] section.
|
||||
# You can then specify the --instance=b option to manage this instance
|
||||
|
||||
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2013
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2014
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# Carbon-relay has several options for metric routing controlled by RELAY_METHOD
|
||||
#
|
||||
# Use relay-rules.conf to route metrics to destinations based on pattern rules
|
||||
#RELAY_METHOD = rules
|
||||
#
|
||||
# Use consistent-hashing for even distribution of metrics between destinations
|
||||
#RELAY_METHOD = consistent-hashing
|
||||
#
|
||||
# Use consistent-hashing but take into account an aggregation-rules.conf shared
|
||||
# by downstream carbon-aggregator daemons. This will ensure that all metrics
|
||||
# that map to a given aggregation rule are sent to the same carbon-aggregator
|
||||
# instance.
|
||||
# Enable this for carbon-relays that send to a group of carbon-aggregators
|
||||
#RELAY_METHOD = aggregated-consistent-hashing
|
||||
RELAY_METHOD = rules
|
||||
|
||||
# If you use consistent-hashing you can add redundancy by replicating every
|
||||
# datapoint to more than one machine.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
# use multiple carbon-cache instances then it would look like this:
|
||||
#
|
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
#
|
||||
# The general form is IP:PORT:INSTANCE where the :INSTANCE part is
|
||||
# optional and refers to the "None" instance if omitted.
|
||||
#
|
||||
# Note that if the destinations are all carbon-caches then this should
|
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
|
||||
# instances listed (order matters!).
|
||||
#
|
||||
# If using RELAY_METHOD = rules, all destinations used in relay-rules.conf
|
||||
# must be defined in this list
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
|
||||
[aggregator]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2023
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2024
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# If set true, metric received will be forwarded to DESTINATIONS in addition to
|
||||
# the output of the aggregation rules. If set false the carbon-aggregator will
|
||||
# only ever send the output of aggregation.
|
||||
FORWARD_ALL = True
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
# use multiple carbon-cache instances then it would look like this:
|
||||
#
|
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
#
|
||||
# The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is
|
||||
# optional and refers to the "None" instance if omitted.
|
||||
#
|
||||
# Note that if the destinations are all carbon-caches then this should
|
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
|
||||
# instances listed (order matters!).
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# If you want to add redundancy to your data by replicating every
|
||||
# datapoint to more than one machine, increase this.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# This is the maximum number of datapoints that can be queued up
|
||||
# for a single destination. Once this limit is hit, we will
|
||||
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
|
||||
# we will drop any subsequently received datapoints.
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
|
||||
# This defines how many datapoints the aggregator remembers for
|
||||
# each metric. Aggregation only happens for datapoints that fall in
|
||||
# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
|
||||
MAX_AGGREGATION_INTERVALS = 5
|
||||
|
||||
# By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back
|
||||
# aggregated data points once every rule.frequency seconds, on a per-rule basis.
|
||||
# Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points
|
||||
# every N seconds, independent of rule frequency. This is useful, for example,
|
||||
# to be able to query partially aggregated metrics from carbon-cache without
|
||||
# having to first wait rule.frequency seconds.
|
||||
# WRITE_BACK_FREQUENCY = 0
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
@ -0,0 +1,57 @@
|
||||
# This configuration file controls the behavior of the Dashboard UI, available
|
||||
# at http://my-graphite-server/dashboard/.
|
||||
#
|
||||
# This file must contain a [ui] section that defines values for all of the
|
||||
# following settings.
|
||||
[ui]
|
||||
default_graph_width = 400
|
||||
default_graph_height = 250
|
||||
automatic_variants = true
|
||||
refresh_interval = 60
|
||||
autocomplete_delay = 375
|
||||
merge_hover_delay = 750
|
||||
|
||||
# You can set this 'default', 'white', or a custom theme name.
|
||||
# To create a custom theme, copy the dashboard-default.css file
|
||||
# to dashboard-myThemeName.css in the content/css directory and
|
||||
# modify it to your liking.
|
||||
theme = default
|
||||
|
||||
[keyboard-shortcuts]
|
||||
toggle_toolbar = ctrl-z
|
||||
toggle_metrics_panel = ctrl-space
|
||||
erase_all_graphs = alt-x
|
||||
save_dashboard = alt-s
|
||||
completer_add_metrics = alt-enter
|
||||
completer_del_metrics = alt-backspace
|
||||
give_completer_focus = shift-space
|
||||
|
||||
# These settings apply to the UI as a whole, all other sections in this file
|
||||
# pertain only to specific metric types.
|
||||
#
|
||||
# The dashboard presents only metrics that fall into specified naming schemes
|
||||
# defined in this file. This creates a simpler, more targetted view of the
|
||||
# data. The general form for defining a naming scheme is as follows:
|
||||
#
|
||||
#[Metric Type]
|
||||
#scheme = basis.path.<field1>.<field2>.<fieldN>
|
||||
#field1.label = Foo
|
||||
#field2.label = Bar
|
||||
#
|
||||
#
|
||||
# Where each <field> will be displayed as a dropdown box
|
||||
# in the UI and the remaining portion of the namespace
|
||||
# shown in the Metric Selector panel. The .label options set the labels
|
||||
# displayed for each dropdown.
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
#[Sales]
|
||||
#scheme = sales.<channel>.<type>.<brand>
|
||||
#channel.label = Channel
|
||||
#type.label = Product Type
|
||||
#brand.label = Brand
|
||||
#
|
||||
# This defines a 'Sales' metric type that uses 3 dropdowns in the Context Selector
|
||||
# (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc)
|
||||
# will be available in the Metric Selector (upper-right panel).
|
@ -0,0 +1,38 @@
|
||||
[default]
|
||||
background = black
|
||||
foreground = white
|
||||
majorLine = white
|
||||
minorLine = grey
|
||||
lineColors = blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose
|
||||
fontName = Sans
|
||||
fontSize = 10
|
||||
fontBold = False
|
||||
fontItalic = False
|
||||
|
||||
[noc]
|
||||
background = black
|
||||
foreground = white
|
||||
majorLine = white
|
||||
minorLine = grey
|
||||
lineColors = blue,green,red,yellow,purple,brown,aqua,grey,magenta,pink,gold,rose
|
||||
fontName = Sans
|
||||
fontSize = 10
|
||||
fontBold = False
|
||||
fontItalic = False
|
||||
|
||||
[plain]
|
||||
background = white
|
||||
foreground = black
|
||||
minorLine = grey
|
||||
majorLine = rose
|
||||
|
||||
[summary]
|
||||
background = black
|
||||
lineColors = #6666ff, #66ff66, #ff6666
|
||||
|
||||
[alphas]
|
||||
background = white
|
||||
foreground = black
|
||||
majorLine = grey
|
||||
minorLine = rose
|
||||
lineColors = 00ff00aa,ff000077,00337799
|
@ -0,0 +1,21 @@
|
||||
# Relay destination rules for carbon-relay. Entries are scanned in order,
|
||||
# and the first pattern a metric matches will cause processing to cease after sending
|
||||
# unless `continue` is set to true
|
||||
#
|
||||
# [name]
|
||||
# pattern = <regex>
|
||||
# destinations = <list of destination addresses>
|
||||
# continue = <boolean> # default: False
|
||||
#
|
||||
# name: Arbitrary unique name to identify the rule
|
||||
# pattern: Regex pattern to match against the metric name
|
||||
# destinations: Comma-separated list of destinations.
|
||||
# ex: 127.0.0.1, 10.1.2.3:2004, 10.1.2.4:2004:a, myserver.mydomain.com
|
||||
# continue: Continue processing rules if this rule matches (default: False)
|
||||
|
||||
# You must have exactly one section with 'default = true'
|
||||
# Note that all destinations listed must also exist in carbon.conf
|
||||
# in the DESTINATIONS setting in the [relay] section
|
||||
[default]
|
||||
default = true
|
||||
destinations = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
@ -0,0 +1,18 @@
|
||||
# This file defines regular expression patterns that can be used to
|
||||
# rewrite metric names in a search & replace fashion. It consists of two
|
||||
# sections, [pre] and [post]. The rules in the pre section are applied to
|
||||
# metric names as soon as they are received. The post rules are applied
|
||||
# after aggregation has taken place.
|
||||
#
|
||||
# The general form of each rule is as follows:
|
||||
#
|
||||
# regex-pattern = replacement-text
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
# [post]
|
||||
# _sum$ =
|
||||
# _avg$ =
|
||||
#
|
||||
# These rules would strip off a suffix of _sum or _avg from any metric names
|
||||
# after aggregation.
|
@ -0,0 +1,43 @@
|
||||
# Aggregation methods for whisper files. Entries are scanned in order,
|
||||
# and first match wins. This file is scanned for changes every 60 seconds
|
||||
#
|
||||
# [name]
|
||||
# pattern = <regex>
|
||||
# xFilesFactor = <float between 0 and 1>
|
||||
# aggregationMethod = <average|sum|last|max|min>
|
||||
#
|
||||
# name: Arbitrary unique name for the rule
|
||||
# pattern: Regex pattern to match against the metric name
|
||||
# xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur
|
||||
# aggregationMethod: function to apply to data points for aggregation
|
||||
#
|
||||
[min]
|
||||
pattern = \.lower$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.upper(_\d+)?$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.sum$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[count]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[count_legacy]
|
||||
pattern = ^stats_counts.*
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.3
|
||||
aggregationMethod = average
|
||||
|
@ -0,0 +1,17 @@
|
||||
# Schema definitions for Whisper files. Entries are scanned in order,
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
@ -0,0 +1,6 @@
|
||||
# This file takes a single regular expression per line
|
||||
# If USE_WHITELIST is set to True in carbon.conf, only metrics received which
|
||||
# match one of these expressions will be persisted. If this file is empty or
|
||||
# missing, all metrics will pass through.
|
||||
# This file is reloaded automatically when changes are made
|
||||
.*
|
@ -0,0 +1,94 @@
|
||||
"""Copyright 2008 Orbitz WorldWide
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License."""
|
||||
|
||||
# Django settings for graphite project.
|
||||
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
|
||||
from os.path import dirname, join, abspath
|
||||
|
||||
|
||||
#Django settings below, do not touch!
|
||||
APPEND_SLASH = False
|
||||
TEMPLATE_DEBUG = False
|
||||
|
||||
TEMPLATES = [
|
||||
{
|
||||
'BACKEND': 'django.template.backends.django.DjangoTemplates',
|
||||
'DIRS': [
|
||||
join(dirname( abspath(__file__) ), 'templates')
|
||||
],
|
||||
'APP_DIRS': True,
|
||||
'OPTIONS': {
|
||||
'context_processors': [
|
||||
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
|
||||
# list if you haven't customized them:
|
||||
'django.contrib.auth.context_processors.auth',
|
||||
'django.template.context_processors.debug',
|
||||
'django.template.context_processors.i18n',
|
||||
'django.template.context_processors.media',
|
||||
'django.template.context_processors.static',
|
||||
'django.template.context_processors.tz',
|
||||
'django.contrib.messages.context_processors.messages',
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
# Language code for this installation. All choices can be found here:
|
||||
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
|
||||
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
|
||||
LANGUAGE_CODE = 'en-us'
|
||||
|
||||
# Absolute path to the directory that holds media.
|
||||
MEDIA_ROOT = ''
|
||||
|
||||
# URL that handles the media served from MEDIA_ROOT.
|
||||
# Example: "http://media.lawrence.com"
|
||||
MEDIA_URL = ''
|
||||
|
||||
MIDDLEWARE_CLASSES = (
|
||||
'graphite.middleware.LogExceptionsMiddleware',
|
||||
'django.middleware.common.CommonMiddleware',
|
||||
'django.middleware.gzip.GZipMiddleware',
|
||||
'django.contrib.sessions.middleware.SessionMiddleware',
|
||||
'django.contrib.auth.middleware.AuthenticationMiddleware',
|
||||
'django.contrib.messages.middleware.MessageMiddleware',
|
||||
)
|
||||
|
||||
ROOT_URLCONF = 'graphite.urls'
|
||||
|
||||
INSTALLED_APPS = (
|
||||
'graphite.metrics',
|
||||
'graphite.render',
|
||||
'graphite.browser',
|
||||
'graphite.composer',
|
||||
'graphite.account',
|
||||
'graphite.dashboard',
|
||||
'graphite.whitelist',
|
||||
'graphite.events',
|
||||
'graphite.url_shortener',
|
||||
'django.contrib.auth',
|
||||
'django.contrib.sessions',
|
||||
'django.contrib.admin',
|
||||
'django.contrib.contenttypes',
|
||||
'django.contrib.staticfiles',
|
||||
'tagging',
|
||||
)
|
||||
|
||||
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
|
||||
|
||||
GRAPHITE_WEB_APP_SETTINGS_LOADED = True
|
||||
|
||||
STATIC_URL = '/static/'
|
||||
|
||||
STATIC_ROOT = '/opt/graphite/static/'
|
@ -0,0 +1,215 @@
|
||||
## Graphite local_settings.py
|
||||
# Edit this file to customize the default Graphite webapp settings
|
||||
#
|
||||
# Additional customizations to Django settings can be added to this file as well
|
||||
|
||||
#####################################
|
||||
# General Configuration #
|
||||
#####################################
|
||||
# Set this to a long, random unique string to use as a secret key for this
|
||||
# install. This key is used for salting of hashes used in auth tokens,
|
||||
# CRSF middleware, cookie storage, etc. This should be set identically among
|
||||
# instances if used behind a load balancer.
|
||||
#SECRET_KEY = 'UNSAFE_DEFAULT'
|
||||
|
||||
# In Django 1.5+ set this to the list of hosts your graphite instances is
|
||||
# accessible as. See:
|
||||
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
|
||||
#ALLOWED_HOSTS = [ '*' ]
|
||||
|
||||
# Set your local timezone (Django's default is America/Chicago)
|
||||
# If your graphs appear to be offset by a couple hours then this probably
|
||||
# needs to be explicitly set to your local timezone.
|
||||
#TIME_ZONE = 'America/Los_Angeles'
|
||||
|
||||
# Override this to provide documentation specific to your Graphite deployment
|
||||
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
|
||||
|
||||
# Logging
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
#LOG_METRIC_ACCESS = True
|
||||
|
||||
# Enable full debug page display on exceptions (Internal Server Error pages)
|
||||
#DEBUG = True
|
||||
|
||||
# If using RRD files and rrdcached, set to the address or socket of the daemon
|
||||
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
|
||||
|
||||
# This lists the memcached servers that will be used by this webapp.
|
||||
# If you have a cluster of webapps you should ensure all of them
|
||||
# have the *exact* same value for this setting. That will maximize cache
|
||||
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
|
||||
# memcached entirely.
|
||||
#
|
||||
# You should not use the loopback address (127.0.0.1) here if using clustering
|
||||
# as every webapp in the cluster should use the exact same values to prevent
|
||||
# unneeded cache misses. Set to [] to disable caching of images and fetched data
|
||||
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
|
||||
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
|
||||
|
||||
|
||||
#####################################
|
||||
# Filesystem Paths #
|
||||
#####################################
|
||||
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
|
||||
# to somewhere else
|
||||
#GRAPHITE_ROOT = '/opt/graphite'
|
||||
|
||||
# Most installs done outside of a separate tree such as /opt/graphite will only
|
||||
# need to change these three settings. Note that the default settings for each
|
||||
# of these is relative to GRAPHITE_ROOT
|
||||
#CONF_DIR = '/opt/graphite/conf'
|
||||
#STORAGE_DIR = '/opt/graphite/storage'
|
||||
#CONTENT_DIR = '/opt/graphite/webapp/content'
|
||||
|
||||
# To further or fully customize the paths, modify the following. Note that the
|
||||
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
|
||||
#
|
||||
## Webapp config files
|
||||
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
|
||||
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
|
||||
|
||||
## Data directories
|
||||
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
|
||||
#WHISPER_DIR = '/opt/graphite/storage/whisper'
|
||||
#RRD_DIR = '/opt/graphite/storage/rrd'
|
||||
#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
|
||||
#LOG_DIR = '/opt/graphite/storage/log/webapp'
|
||||
#INDEX_FILE = '/opt/graphite/storage/index' # Search index file
|
||||
|
||||
|
||||
#####################################
|
||||
# Email Configuration #
|
||||
#####################################
|
||||
# This is used for emailing rendered Graphs
|
||||
# Default backend is SMTP
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
|
||||
#EMAIL_HOST = 'localhost'
|
||||
#EMAIL_PORT = 25
|
||||
#EMAIL_HOST_USER = ''
|
||||
#EMAIL_HOST_PASSWORD = ''
|
||||
#EMAIL_USE_TLS = False
|
||||
# To drop emails on the floor, enable the Dummy backend:
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
|
||||
|
||||
|
||||
#####################################
|
||||
# Authentication Configuration #
|
||||
#####################################
|
||||
## LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
# OR
|
||||
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
#
|
||||
# If you want to further customize the ldap connection options you should
|
||||
# directly use ldap.set_option to set the ldap module's global options.
|
||||
# For example:
|
||||
#
|
||||
#import ldap
|
||||
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
|
||||
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
|
||||
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
|
||||
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
|
||||
# See http://www.python-ldap.org/ for further details on these options.
|
||||
|
||||
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
|
||||
#USE_REMOTE_USER_AUTHENTICATION = True
|
||||
|
||||
# Override the URL for the login link (e.g. for django_openid_auth)
|
||||
#LOGIN_URL = '/account/login'
|
||||
|
||||
|
||||
##########################
|
||||
# Database Configuration #
|
||||
##########################
|
||||
# By default sqlite is used. If you cluster multiple webapps you will need
|
||||
# to setup an external database (such as MySQL) and configure all of the webapp
|
||||
# instances to use the same database. Note that this database is only used to store
|
||||
# Django models such as saved graphs, dashboards, user preferences, etc.
|
||||
# Metric data is not stored here.
|
||||
#
|
||||
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
|
||||
#
|
||||
# The following built-in database engines are available:
|
||||
# django.db.backends.postgresql # Removed in Django 1.4
|
||||
# django.db.backends.postgresql_psycopg2
|
||||
# django.db.backends.mysql
|
||||
# django.db.backends.sqlite3
|
||||
# django.db.backends.oracle
|
||||
#
|
||||
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
|
||||
# located in STORAGE_DIR
|
||||
#
|
||||
#DATABASES = {
|
||||
# 'default': {
|
||||
# 'NAME': '/opt/graphite/storage/graphite.db',
|
||||
# 'ENGINE': 'django.db.backends.sqlite3',
|
||||
# 'USER': '',
|
||||
# 'PASSWORD': '',
|
||||
# 'HOST': '',
|
||||
# 'PORT': ''
|
||||
# }
|
||||
#}
|
||||
#
|
||||
|
||||
|
||||
#########################
|
||||
# Cluster Configuration #
|
||||
#########################
|
||||
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
|
||||
#
|
||||
# This should list the IP address (and optionally port) of the webapp on each
|
||||
# remote server in the cluster. These servers must each have local access to
|
||||
# metric data. Note that the first server to return a match for a query will be
|
||||
# used.
|
||||
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
|
||||
|
||||
## These are timeout values (in seconds) for requests to remote webapps
|
||||
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data
|
||||
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests
|
||||
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp
|
||||
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
|
||||
|
||||
## Remote rendering settings
|
||||
# Set to True to enable rendering of Graphs on a remote webapp
|
||||
#REMOTE_RENDERING = True
|
||||
# List of IP (and optionally port) of the webapp on each remote server that
|
||||
# will be used for rendering. Note that each rendering host should have local
|
||||
# access to metric data or should have CLUSTER_SERVERS configured
|
||||
#RENDERING_HOSTS = []
|
||||
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
|
||||
|
||||
# If you are running multiple carbon-caches on this machine (typically behind a relay using
|
||||
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
|
||||
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
|
||||
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
|
||||
#
|
||||
# You *should* use 127.0.0.1 here in most cases
|
||||
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
|
||||
#CARBONLINK_TIMEOUT = 1.0
|
||||
|
||||
#####################################
|
||||
# Additional Django Settings #
|
||||
#####################################
|
||||
# Uncomment the following line for direct access to Django settings such as
|
||||
# MIDDLEWARE_CLASSES or APPS
|
||||
#from graphite.app_settings import *
|
||||
|
||||
import os
|
||||
|
||||
LOG_DIR = '/var/log/graphite'
|
||||
SECRET_KEY = '$(date +%s | sha256sum | base64 | head -c 64)'
|
||||
|
||||
if (os.getenv("MEMCACHE_HOST") is not None):
|
||||
MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOST").split(",")
|
||||
|
||||
if (os.getenv("DEFAULT_CACHE_DURATION") is not None):
|
||||
DEFAULT_CACHE_DURATION = int(os.getenv("CACHE_DURATION"))
|
||||
|
6
docker/blocks/graphite1/conf/opt/statsd/config.js
Normal file
6
docker/blocks/graphite1/conf/opt/statsd/config.js
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"graphiteHost": "127.0.0.1",
|
||||
"graphitePort": 2003,
|
||||
"port": 8125,
|
||||
"flushInterval": 10000
|
||||
}
|
26
docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
Executable file
26
docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
Executable file
@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env expect
|
||||
|
||||
set timeout -1
|
||||
spawn /usr/local/bin/manage.sh
|
||||
|
||||
expect "Would you like to create one now" {
|
||||
send "yes\r"
|
||||
}
|
||||
|
||||
expect "Username" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Email address:" {
|
||||
send "root.graphite@mailinator.com\r"
|
||||
}
|
||||
|
||||
expect "Password:" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Password *:" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Superuser created successfully"
|
3
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file
3
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings
|
16
docker/blocks/graphite1/fig
Normal file
16
docker/blocks/graphite1/fig
Normal file
@ -0,0 +1,16 @@
|
||||
graphite:
|
||||
build: blocks/graphite1
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
76
docker/blocks/graphite1/files/carbon.conf
Normal file
76
docker/blocks/graphite1/files/carbon.conf
Normal file
@ -0,0 +1,76 @@
|
||||
[cache]
|
||||
LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
LOG_UPDATES = False
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
102
docker/blocks/graphite1/files/events_views.py
Normal file
102
docker/blocks/graphite1/files/events_views.py
Normal file
@ -0,0 +1,102 @@
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from django.utils.timezone import get_current_timezone
|
||||
from django.core.urlresolvers import get_script_prefix
|
||||
from django.http import HttpResponse
|
||||
from django.shortcuts import render_to_response, get_object_or_404
|
||||
from pytz import timezone
|
||||
|
||||
from graphite.util import json
|
||||
from graphite.events import models
|
||||
from graphite.render.attime import parseATTime
|
||||
|
||||
|
||||
def to_timestamp(dt):
|
||||
return time.mktime(dt.timetuple())
|
||||
|
||||
|
||||
class EventEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return to_timestamp(obj)
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def view_events(request):
|
||||
if request.method == "GET":
|
||||
context = { 'events' : fetch(request),
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("events.html", context)
|
||||
else:
|
||||
return post_event(request)
|
||||
|
||||
def detail(request, event_id):
|
||||
e = get_object_or_404(models.Event, pk=event_id)
|
||||
context = { 'event' : e,
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("event.html", context)
|
||||
|
||||
|
||||
def post_event(request):
|
||||
if request.method == 'POST':
|
||||
event = json.loads(request.body)
|
||||
assert isinstance(event, dict)
|
||||
|
||||
values = {}
|
||||
values["what"] = event["what"]
|
||||
values["tags"] = event.get("tags", None)
|
||||
values["when"] = datetime.datetime.fromtimestamp(
|
||||
event.get("when", time.time()))
|
||||
if "data" in event:
|
||||
values["data"] = event["data"]
|
||||
|
||||
e = models.Event(**values)
|
||||
e.save()
|
||||
|
||||
return HttpResponse(status=200)
|
||||
else:
|
||||
return HttpResponse(status=405)
|
||||
|
||||
def get_data(request):
|
||||
if 'jsonp' in request.REQUEST:
|
||||
response = HttpResponse(
|
||||
"%s(%s)" % (request.REQUEST.get('jsonp'),
|
||||
json.dumps(fetch(request), cls=EventEncoder)),
|
||||
mimetype='text/javascript')
|
||||
else:
|
||||
response = HttpResponse(
|
||||
json.dumps(fetch(request), cls=EventEncoder),
|
||||
mimetype="application/json")
|
||||
return response
|
||||
|
||||
def fetch(request):
|
||||
#XXX we need to move to USE_TZ=True to get rid of naive-time conversions
|
||||
def make_naive(dt):
|
||||
if 'tz' in request.GET:
|
||||
tz = timezone(request.GET['tz'])
|
||||
else:
|
||||
tz = get_current_timezone()
|
||||
local_dt = dt.astimezone(tz)
|
||||
if hasattr(local_dt, 'normalize'):
|
||||
local_dt = local_dt.normalize()
|
||||
return local_dt.replace(tzinfo=None)
|
||||
|
||||
if request.GET.get("from", None) is not None:
|
||||
time_from = make_naive(parseATTime(request.GET["from"]))
|
||||
else:
|
||||
time_from = datetime.datetime.fromtimestamp(0)
|
||||
|
||||
if request.GET.get("until", None) is not None:
|
||||
time_until = make_naive(parseATTime(request.GET["until"]))
|
||||
else:
|
||||
time_until = datetime.datetime.now()
|
||||
|
||||
tags = request.GET.get("tags", None)
|
||||
if tags is not None:
|
||||
tags = request.GET.get("tags").split(" ")
|
||||
|
||||
return [x.as_dict() for x in
|
||||
models.Event.find_events(time_from, time_until, tags=tags)]
|
20
docker/blocks/graphite1/files/initial_data.json
Normal file
20
docker/blocks/graphite1/files/initial_data.json
Normal file
@ -0,0 +1,20 @@
|
||||
[
|
||||
{
|
||||
"pk": 1,
|
||||
"model": "auth.user",
|
||||
"fields": {
|
||||
"username": "admin",
|
||||
"first_name": "",
|
||||
"last_name": "",
|
||||
"is_active": true,
|
||||
"is_superuser": true,
|
||||
"is_staff": true,
|
||||
"last_login": "2011-09-20 17:02:14",
|
||||
"groups": [],
|
||||
"user_permissions": [],
|
||||
"password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236",
|
||||
"email": "root@example.com",
|
||||
"date_joined": "2011-09-20 17:02:14"
|
||||
}
|
||||
}
|
||||
]
|
42
docker/blocks/graphite1/files/local_settings.py
Normal file
42
docker/blocks/graphite1/files/local_settings.py
Normal file
@ -0,0 +1,42 @@
|
||||
# Edit this file to override the default graphite settings, do not edit settings.py
|
||||
|
||||
# Turn on debugging and restart apache if you ever see an "Internal Server Error" page
|
||||
#DEBUG = True
|
||||
|
||||
# Set your local timezone (django will try to figure this out automatically)
|
||||
TIME_ZONE = 'UTC'
|
||||
|
||||
# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely
|
||||
#MEMCACHE_HOSTS = ['127.0.0.1:11211']
|
||||
|
||||
# Sometimes you need to do a lot of rendering work but cannot share your storage mount
|
||||
#REMOTE_RENDERING = True
|
||||
#RENDERING_HOSTS = ['fastserver01','fastserver02']
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
|
||||
# If you've got more than one backend server they should all be listed here
|
||||
#CLUSTER_SERVERS = []
|
||||
|
||||
# Override this if you need to provide documentation specific to your graphite deployment
|
||||
#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite"
|
||||
|
||||
# Enable email-related features
|
||||
#SMTP_SERVER = "mail.mycompany.com"
|
||||
|
||||
# LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
|
||||
# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!)
|
||||
#DATABASE_ENGINE = 'mysql' # or 'postgres'
|
||||
#DATABASE_NAME = 'graphite'
|
||||
#DATABASE_USER = 'graphite'
|
||||
#DATABASE_PASSWORD = 'graphite-is-awesome'
|
||||
#DATABASE_HOST = 'mysql.mycompany.com'
|
||||
#DATABASE_PORT = '3306'
|
1
docker/blocks/graphite1/files/my_htpasswd
Normal file
1
docker/blocks/graphite1/files/my_htpasswd
Normal file
@ -0,0 +1 @@
|
||||
grafana:$apr1$4R/20xhC$8t37jPP5dbcLr48btdkU//
|
70
docker/blocks/graphite1/files/nginx.conf
Normal file
70
docker/blocks/graphite1/files/nginx.conf
Normal file
@ -0,0 +1,70 @@
|
||||
daemon off;
|
||||
user www-data;
|
||||
worker_processes 1;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
server_tokens off;
|
||||
|
||||
server_names_hash_bucket_size 32;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
|
||||
open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Server $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
client_max_body_size 10m;
|
||||
client_body_buffer_size 128k;
|
||||
|
||||
proxy_connect_timeout 90;
|
||||
proxy_send_timeout 90;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 32k;
|
||||
proxy_busy_buffers_size 64k;
|
||||
proxy_temp_file_write_size 64k;
|
||||
}
|
||||
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
add_header Access-Control-Allow-Methods "GET, OPTIONS";
|
||||
add_header Access-Control-Allow-Headers "origin, authorization, accept";
|
||||
|
||||
location /content {
|
||||
alias /opt/graphite/webapp/content;
|
||||
|
||||
}
|
||||
|
||||
location /media {
|
||||
alias /usr/share/pyshared/django/contrib/admin/media;
|
||||
}
|
||||
}
|
||||
}
|
8
docker/blocks/graphite1/files/statsd_config.js
Normal file
8
docker/blocks/graphite1/files/statsd_config.js
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
graphitePort: 2003,
|
||||
graphiteHost: "127.0.0.1",
|
||||
port: 8125,
|
||||
mgmt_port: 8126,
|
||||
backends: ['./backends/graphite'],
|
||||
debug: true
|
||||
}
|
19
docker/blocks/graphite1/files/storage-aggregation.conf
Normal file
19
docker/blocks/graphite1/files/storage-aggregation.conf
Normal file
@ -0,0 +1,19 @@
|
||||
[min]
|
||||
pattern = \.min$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.max$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.5
|
||||
aggregationMethod = average
|
16
docker/blocks/graphite1/files/storage-schemas.conf
Normal file
16
docker/blocks/graphite1/files/storage-schemas.conf
Normal file
@ -0,0 +1,16 @@
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
26
docker/blocks/graphite1/files/supervisord.conf
Normal file
26
docker/blocks/graphite1/files/supervisord.conf
Normal file
@ -0,0 +1,26 @@
|
||||
[supervisord]
|
||||
nodaemon = true
|
||||
environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf'
|
||||
|
||||
[program:nginx]
|
||||
command = /usr/sbin/nginx
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:carbon-cache]
|
||||
;user = www-data
|
||||
command = /opt/graphite/bin/carbon-cache.py --debug start
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:graphite-webapp]
|
||||
;user = www-data
|
||||
directory = /opt/graphite/webapp
|
||||
environment = PYTHONPATH='/opt/graphite/webapp'
|
||||
command = /usr/bin/gunicorn_django -b127.0.0.1:8000 -w2 graphite/settings.py
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
@ -6,3 +6,4 @@ postgrestest:
|
||||
POSTGRES_DATABASE: grafana
|
||||
ports:
|
||||
- "5432:5432"
|
||||
command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql
|
||||
|
@ -27,14 +27,12 @@ and the conditions that need to be met for the alert to change state and trigger
|
||||
## Execution
|
||||
|
||||
The alert rules are evaluated in the Grafana backend in a scheduler and query execution engine that is part
|
||||
of core Grafana. Only some data soures are supported right now. They include `Graphite`, `Prometheus`,
|
||||
of core Grafana. Only some data sources are supported right now. They include `Graphite`, `Prometheus`,
|
||||
`InfluxDB` and `OpenTSDB`.
|
||||
|
||||
### Clustering
|
||||
|
||||
We have not implemented clustering yet. So if you run multiple instances of grafana-server
|
||||
you have to make sure [execute_alerts]({{< relref "installation/configuration.md#alerting" >}})
|
||||
is true on only one instance or otherwise you will get duplicated notifications.
|
||||
Currently alerting supports a limited form of high availability. Since v4.2.0 of Grafana, alert notifications are deduped when running multiple servers. This means all alerts are executed on every server but no duplicate alert notifications are sent due to the deduping logic. Proper load balancing of alerts will be introduced in the future.
|
||||
|
||||
<div class="clearfix"></div>
|
||||
|
||||
@ -61,8 +59,8 @@ specify a query letter, time range and an aggregation function.
|
||||
avg() OF query(A, 5m, now) IS BELOW 14
|
||||
```
|
||||
|
||||
- `avg()` Controls how the values for **each** serie should be reduced to a value that can be compared against the threshold. Click on the function to change it to another aggregation function.
|
||||
- `query(A, 5m, now)` The letter defines what query to execute from the **Metrics** tab. The second two parameters defines the time range, `5m, now` means 5 minutes from now to now. You can also do `10m, now-2m` to define a time range that will be 10 minutes from now to 2 minutes from now. This is useful if you want to ignore the last 2 minutes of data.
|
||||
- `avg()` Controls how the values for **each** series should be reduced to a value that can be compared against the threshold. Click on the function to change it to another aggregation function.
|
||||
- `query(A, 5m, now)` The letter defines what query to execute from the **Metrics** tab. The second two parameters define the time range, `5m, now` means 5 minutes from now to now. You can also do `10m, now-2m` to define a time range that will be 10 minutes from now to 2 minutes from now. This is useful if you want to ignore the last 2 minutes of data.
|
||||
- `IS BELOW 14` Defines the type of threshold and the threshold value. You can click on `IS BELOW` to change the type of threshold.
|
||||
|
||||
The query used in an alert rule cannot contain any template variables. Currently we only support `AND` and `OR` operators between conditions and they are executed serially.
|
||||
@ -76,7 +74,7 @@ of another alert in your conditions, and `Time Of Day`.
|
||||
#### Multiple Series
|
||||
|
||||
If a query returns multiple series then the aggregation function and threshold check will be evaluated for each series.
|
||||
What Grafana does not do currently is track alert rule state **per series**. This has implications that is exemplified
|
||||
What Grafana does not do currently is track alert rule state **per series**. This has implications that are detailed
|
||||
in the scenario below.
|
||||
|
||||
- Alert condition with query that returns 2 series: **server1** and **server2**
|
||||
@ -91,8 +89,7 @@ we plan to track state **per series** in a future release.
|
||||
|
||||
### No Data / Null values
|
||||
|
||||
Below you condition you can configure how the rule evaluation engine should handle queries that return no data or only null valued
|
||||
data.
|
||||
Below your conditions you can configure how the rule evaluation engine should handle queries that return no data or only null values.
|
||||
|
||||
No Data Option | Description
|
||||
------------ | -------------
|
||||
@ -102,23 +99,23 @@ Keep Last State | Keep the current alert rule state, what ever it is.
|
||||
|
||||
### Execution errors or timeouts
|
||||
|
||||
The last option is how to handle execution or timeout errors.
|
||||
The last option tells how to handle execution or timeout errors.
|
||||
|
||||
Error or timeout option | Description
|
||||
------------ | -------------
|
||||
Alerting | Set alert rule state to `Alerting`
|
||||
Keep Last State | Keep the current alert rule state, what ever it is.
|
||||
|
||||
If you an unreliable time series store that where queries sometime timeout or fail randomly you can set this option
|
||||
t `Keep Last State` to basically ignore them.
|
||||
If you have an unreliable time series store from which queries sometime timeout or fail randomly you can set this option
|
||||
to `Keep Last State` in order to basically ignore them.
|
||||
|
||||
## Notifications
|
||||
|
||||
In alert tab you can also specify alert rule notifications along with a detailed messsage about the alert rule.
|
||||
The message can contain anything, information about how you might solve the issue, link to runbook etc.
|
||||
The message can contain anything, information about how you might solve the issue, link to runbook, etc.
|
||||
|
||||
The actual notifications are configured and shared between multiple alerts. Read the
|
||||
[Notifications]({{< relref "notifications.md" >}}) guide for how to configure and setup notifications.
|
||||
[notifications]({{< relref "notifications.md" >}}) guide for how to configure and setup notifications.
|
||||
|
||||
## Alert State History & Annotations
|
||||
|
||||
@ -131,7 +128,7 @@ submenu in the alert tab to view & clear state history.
|
||||
{{< imgbox max-width="40%" img="/img/docs/v4/alert_test_rule.png" caption="Test Rule" >}}
|
||||
|
||||
First level of troubleshooting you can do is hit the **Test Rule** button. You will get result back that you can expand
|
||||
to the point where you can see the raw data that was returned form your query.
|
||||
to the point where you can see the raw data that was returned from your query.
|
||||
|
||||
Further troubleshooting can also be done by inspecting the grafana-server log. If it's not an error or for some reason
|
||||
the log does not say anything you can enable debug logging for some relevant components. This is done
|
||||
|
@ -88,7 +88,7 @@ You can switch to raw query mode by clicking hamburger icon and then `Switch edi
|
||||
- $m = replaced with measurement name
|
||||
- $measurement = replaced with measurement name
|
||||
- $col = replaced with column name
|
||||
- $tag_exampletag = replaced with the value of the `exampletag` tag. To use your tag as an alias in the ALIAS BY field then the tag must be used to group by in the query.
|
||||
- $tag_exampletag = replaced with the value of the `exampletag` tag. The syntax is `$tag_yourTagName` (must start with `$tag_`). To use your tag as an alias in the ALIAS BY field then the tag must be used to group by in the query.
|
||||
- You can also use [[tag_hostname]] pattern replacement syntax. For example, in the ALIAS BY field using this text `Host: [[tag_hostname]]` would substitute in the `hostname` tag value for each legend value and an example legend value would be: `Host: server1`.
|
||||
|
||||
### Table query / raw data
|
||||
|
@ -29,8 +29,7 @@ data from a MySQL compatible database.
|
||||
The database user you specify when you add the data source should only be granted SELECT permissions on
|
||||
the specified database & tables you want to query. Grafana does not validate that the query is safe. The query
|
||||
could include any SQL statement. For example, statements like `USE otherdb;` and `DROP TABLE user;` would be
|
||||
executed. To protect against this we **Highly** recommmend you create a specific mysql user with
|
||||
restricted permissions.
|
||||
executed. To protect against this we **Highly** recommmend you create a specific mysql user with restricted permissions.
|
||||
|
||||
Example:
|
||||
|
||||
@ -49,11 +48,9 @@ Macro example | Description
|
||||
------------ | -------------
|
||||
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn > FROM_UNIXTIME(1494410783) AND dateColumn < FROM_UNIXTIME(1494497183)*
|
||||
|
||||
We plan to add many more macros. If you have suggestions for what macros you would like to see, please
|
||||
[open an issue](https://github.com/grafana/grafana) in our GitHub repo.
|
||||
We plan to add many more macros. If you have suggestions for what macros you would like to see, please [open an issue](https://github.com/grafana/grafana) in our GitHub repo.
|
||||
|
||||
The query editor has a link named `Generated SQL` that show up after a query as been executed, while in panel edit mode. Click
|
||||
on it and it will expand and show the raw interpolated SQL string that was executed.
|
||||
The query editor has a link named `Generated SQL` that show up after a query as been executed, while in panel edit mode. Click on it and it will expand and show the raw interpolated SQL string that was executed.
|
||||
|
||||
## Table queries
|
||||
|
||||
@ -109,8 +106,71 @@ This is something we plan to add.
|
||||
|
||||
## Templating
|
||||
|
||||
You can use variables in your queries but there are currently no support for defining `Query` variables
|
||||
that target a MySQL data source.
|
||||
This feature is currently available in the nightly builds and will be included in the 5.0.0 release.
|
||||
|
||||
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data being displayed in your dashboard.
|
||||
|
||||
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different types of template variables.
|
||||
|
||||
### Query Variable
|
||||
|
||||
If you add a template variable of the type `Query`, you can write a MySQL query that can
|
||||
return things like measurement names, key names or key values that are shown as a dropdown select box.
|
||||
|
||||
For example, you can have a variable that contains all values for the `hostname` column in a table if you specify a query like this in the templating variable *Query* setting.
|
||||
|
||||
```sql
|
||||
SELECT hostname FROM my_host
|
||||
```
|
||||
|
||||
A query can returns multiple columns and Grafana will automatically create a list from them. For example, the query below will return a list with values from `hostname` and `hostname2`.
|
||||
|
||||
```sql
|
||||
SELECT my_host.hostname, my_other_host.hostname2 FROM my_host JOIN my_other_host ON my_host.city = my_other_host.city
|
||||
```
|
||||
|
||||
Another option is a query that can create a key/value variable. The query should return two columns that are named `__text` and `__value`. The `__text` column value should be unique (if it is not unique then the first value is used). The options in the dropdown will have a text and value that allows you to have a friendly name as text and an id as the value. An example query with `hostname` as the text and `id` as the value:
|
||||
|
||||
```sql
|
||||
SELECT hostname AS __text, id AS __value FROM my_host
|
||||
```
|
||||
|
||||
You can also create nested variables. For example if you had another variable named `region`. Then you could have
|
||||
the hosts variable only show hosts from the current selected region with a query like this (if `region` is a multi-value variable then use the `IN` comparison operator rather than `=` to match against multiple values):
|
||||
|
||||
```sql
|
||||
SELECT hostname FROM my_host WHERE region IN($region)
|
||||
```
|
||||
|
||||
### Using Variables in Queries
|
||||
|
||||
Template variables are quoted automatically so if it is a string value do not wrap them in quotes in where clauses. If the variable is a multi-value variable then use the `IN` comparison operator rather than `=` to match against multiple values.
|
||||
|
||||
There are two syntaxes:
|
||||
|
||||
`$<varname>` Example with a template variable named `hostname`:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
UNIX_TIMESTAMP(atimestamp) as time_sec,
|
||||
aint as value,
|
||||
avarchar as metric
|
||||
FROM my_table
|
||||
WHERE $__timeFilter(atimestamp) and hostname in($hostname)
|
||||
ORDER BY atimestamp ASC
|
||||
```
|
||||
|
||||
`[[varname]]` Example with a template variable named `hostname`:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
UNIX_TIMESTAMP(atimestamp) as time_sec,
|
||||
aint as value,
|
||||
avarchar as metric
|
||||
FROM my_table
|
||||
WHERE $__timeFilter(atimestamp) and hostname in([[hostname]])
|
||||
ORDER BY atimestamp ASC
|
||||
```
|
||||
|
||||
## Alerting
|
||||
|
||||
|
@ -34,7 +34,7 @@ The singlestat panel has a normal query editor to allow you define your exact me
|
||||
* `delta` - The total incremental increase (of a counter) in the series. An attempt is made to account for counter resets, but this will only be accurate for single instance metrics. Used to show total counter increase in time series.
|
||||
* `diff` - The difference betwen 'current' (last value) and 'first'.
|
||||
* `range` - The difference between 'min' and 'max'. Useful the show the range of change for a gauge.
|
||||
4. `Postfixes`: The Postfix fields let you define a custom label and font-size (as a %) to appear *after* the value
|
||||
4. `Prefix/Postfix`: The Prefix/Postfix fields let you define a custom label and font-size (as a %) to appear *before/after* the value. The `$__name` variable can be used here to use the series name or alias from the metric query.
|
||||
5. `Units`: Units are appended to the the Singlestat within the panel, and will respect the color and threshold settings for the value.
|
||||
6. `Decimals`: The Decimal field allows you to override the automatic decimal precision, and set it explicitly.
|
||||
|
||||
|
@ -20,9 +20,9 @@ parent = "http_api"
|
||||
GET /api/users HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`.
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`. Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@ -55,10 +55,12 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/search?perpage=10&page=1&query=mygraf HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`. The `totalCount` field in the response can be used for pagination of the user list E.g. if `totalCount` is equal to 100 users and the `perpage` parameter is set to 10 then there are 10 pages of users. The `query` parameter is optional and it will return results where the query value is contained in one of the `name`, `login` or `email` fields. Query values with spaces need to be url encoded e.g. `query=Jane%20Doe`.
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
@ -94,7 +96,9 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@ -126,7 +130,9 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/lookup?loginOrEmail=admin HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@ -152,7 +158,7 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
PUT /api/users/2 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
{
|
||||
"email":"user@mygraf.com",
|
||||
@ -161,6 +167,8 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
"theme":"light"
|
||||
}
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
@ -178,7 +186,9 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/1/orgs HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@ -246,11 +256,29 @@ Changes the password for the user
|
||||
|
||||
{"message":"User password changed"}
|
||||
|
||||
## Switch user context
|
||||
## Switch user context for a specified user
|
||||
|
||||
`POST /api/user/using/:organisationId`
|
||||
`POST /api/users/:userId/using/:organizationId`
|
||||
|
||||
Switch user context to the given organisation.
|
||||
Switch user context to the given organization. Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
POST /api/users/7/using/2 HTTP/1.1
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Active organization changed"}
|
||||
|
||||
## Switch user context for signed in user
|
||||
|
||||
`POST /api/user/using/:organizationId`
|
||||
|
||||
Switch user context to the given organization.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
|
@ -15,7 +15,7 @@ weight = 1
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for Debian-based Linux | [grafana_4.4.2_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.2_amd64.deb)
|
||||
Stable for Debian-based Linux | [grafana_4.4.3_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@ -23,9 +23,9 @@ installation.
|
||||
## Install Stable
|
||||
|
||||
```bash
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.2_amd64.deb
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_4.4.2_amd64.deb
|
||||
sudo dpkg -i grafana_4.4.3_amd64.deb
|
||||
```
|
||||
|
||||
<!--
|
||||
|
@ -15,7 +15,7 @@ weight = 2
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.4.2 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.2-1.x86_64.rpm)
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.4.3 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@ -24,19 +24,19 @@ installation.
|
||||
|
||||
You can install Grafana using Yum directly.
|
||||
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.2-1.x86_64.rpm
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
Or install manually using `rpm`.
|
||||
|
||||
#### On CentOS / Fedora / Redhat:
|
||||
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.2-1.x86_64.rpm
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
|
||||
$ sudo yum install initscripts fontconfig
|
||||
$ sudo rpm -Uvh grafana-4.4.2-1.x86_64.rpm
|
||||
$ sudo rpm -Uvh grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
#### On OpenSuse:
|
||||
|
||||
$ sudo rpm -i --nodeps grafana-4.4.1-1.x86_64.rpm
|
||||
$ sudo rpm -i --nodeps grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
## Install via YUM Repository
|
||||
|
||||
|
@ -13,7 +13,7 @@ weight = 3
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Latest stable package for Windows | [grafana.4.4.1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.1.windows-x64.zip)
|
||||
Latest stable package for Windows | [grafana.4.4.3.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3.windows-x64.zip)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
|
@ -177,6 +177,10 @@ This is used in the WHERE clause for the InfluxDB data source. Grafana adds it a
|
||||
|
||||
The `$__timeFilter` is used in the MySQL data source.
|
||||
|
||||
### The $__name Variable
|
||||
|
||||
This variable is only available in the Singlestat panel and can be used in the prefix or suffix fields on the Options tab. The variable will be replaced with the series name or alias.
|
||||
|
||||
## Repeating Panels
|
||||
|
||||
Template variables can be very useful to dynamically change your queries across a whole dashboard. If you want
|
||||
|
@ -18,3 +18,5 @@ package_cloud push grafana/testing/el/7 grafana-${version}-1.x86_64.rpm
|
||||
|
||||
package_cloud push grafana/stable/el/7 grafana-${version}-1.x86_64.rpm
|
||||
package_cloud push grafana/stable/el/6 grafana-${version}-1.x86_64.rpm
|
||||
|
||||
rm grafana*.{deb,rpm}
|
||||
|
@ -13,3 +13,4 @@ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-${rpm_v
|
||||
package_cloud push grafana/testing/el/6 grafana-${rpm_ver}.x86_64.rpm
|
||||
package_cloud push grafana/testing/el/7 grafana-${rpm_ver}.x86_64.rpm
|
||||
|
||||
rm grafana*.{deb,rpm}
|
||||
|
@ -234,9 +234,14 @@ func (hs *HttpServer) registerRoutes() {
|
||||
|
||||
// Dashboard
|
||||
r.Group("/dashboards", func() {
|
||||
r.Combo("/db/:slug").Get(wrap(GetDashboard)).Delete(wrap(DeleteDashboard))
|
||||
r.Get("/db/:slug", wrap(GetDashboard))
|
||||
r.Delete("/db/:slug", wrap(DeleteDashboard))
|
||||
r.Post("/db", bind(m.SaveDashboardCommand{}), wrap(PostDashboard))
|
||||
|
||||
r.Get("/id/:dashboardId/versions", wrap(GetDashboardVersions))
|
||||
r.Get("/id/:dashboardId/versions/:id", wrap(GetDashboardVersion))
|
||||
r.Post("/id/:dashboardId/restore", reqEditorRole, bind(dtos.RestoreDashboardVersionCommand{}), wrap(RestoreDashboardVersion))
|
||||
|
||||
r.Post("/calculate-diff", bind(dtos.CalculateDiffOptions{}), wrap(CalculateDashboardDiff))
|
||||
r.Get("/home", wrap(GetHomeDashboard))
|
||||
r.Get("/tags", GetDashboardTags)
|
||||
|
@ -31,7 +31,7 @@ type AdminUpdateUserPasswordForm struct {
|
||||
}
|
||||
|
||||
type AdminUpdateUserPermissionsForm struct {
|
||||
IsGrafanaAdmin bool `json:"isGrafanaAdmin" binding:"Required"`
|
||||
IsGrafanaAdmin bool `json:"isGrafanaAdmin"`
|
||||
}
|
||||
|
||||
type AdminUserListItem struct {
|
||||
|
@ -143,6 +143,7 @@ func loginUserWithUser(user *m.User, c *middleware.Context) {
|
||||
c.SetSuperSecureCookie(user.Rands+user.Password, setting.CookieRememberName, user.Login, days, setting.AppSubUrl+"/")
|
||||
}
|
||||
|
||||
c.Session.RegenerateId(c)
|
||||
c.Session.Set(middleware.SESS_KEY_USERID, user.Id)
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,6 @@ var pidFile = flag.String("pidfile", "", "path to pid file")
|
||||
var exitChan = make(chan int)
|
||||
|
||||
func init() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -66,10 +67,10 @@ func updateTotalStats() {
|
||||
return
|
||||
}
|
||||
|
||||
M_StatTotal_Dashboards.Update(statsQuery.Result.DashboardCount)
|
||||
M_StatTotal_Users.Update(statsQuery.Result.UserCount)
|
||||
M_StatTotal_Playlists.Update(statsQuery.Result.PlaylistCount)
|
||||
M_StatTotal_Orgs.Update(statsQuery.Result.OrgCount)
|
||||
M_StatTotal_Dashboards.Update(statsQuery.Result.Dashboards)
|
||||
M_StatTotal_Users.Update(statsQuery.Result.Users)
|
||||
M_StatTotal_Playlists.Update(statsQuery.Result.Playlists)
|
||||
M_StatTotal_Orgs.Update(statsQuery.Result.Orgs)
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,6 +87,8 @@ func sendUsageStats() {
|
||||
report := map[string]interface{}{
|
||||
"version": version,
|
||||
"metrics": metrics,
|
||||
"os": runtime.GOOS,
|
||||
"arch": runtime.GOARCH,
|
||||
}
|
||||
|
||||
statsQuery := m.GetSystemStatsQuery{}
|
||||
@ -94,14 +97,16 @@ func sendUsageStats() {
|
||||
return
|
||||
}
|
||||
|
||||
metrics["stats.dashboards.count"] = statsQuery.Result.DashboardCount
|
||||
metrics["stats.users.count"] = statsQuery.Result.UserCount
|
||||
metrics["stats.orgs.count"] = statsQuery.Result.OrgCount
|
||||
metrics["stats.playlist.count"] = statsQuery.Result.PlaylistCount
|
||||
metrics["stats.dashboards.count"] = statsQuery.Result.Dashboards
|
||||
metrics["stats.users.count"] = statsQuery.Result.Users
|
||||
metrics["stats.orgs.count"] = statsQuery.Result.Orgs
|
||||
metrics["stats.playlist.count"] = statsQuery.Result.Playlists
|
||||
metrics["stats.plugins.apps.count"] = len(plugins.Apps)
|
||||
metrics["stats.plugins.panels.count"] = len(plugins.Panels)
|
||||
metrics["stats.plugins.datasources.count"] = len(plugins.DataSources)
|
||||
metrics["stats.alerts.count"] = statsQuery.Result.AlertCount
|
||||
metrics["stats.alerts.count"] = statsQuery.Result.Alerts
|
||||
metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers
|
||||
metrics["stats.datasources.count"] = statsQuery.Result.Datasources
|
||||
|
||||
dsStats := m.GetDataSourceStatsQuery{}
|
||||
if err := bus.Dispatch(&dsStats); err != nil {
|
||||
|
@ -106,6 +106,10 @@ func (s *mockSession) Destory(c *Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *mockSession) RegenerateId(c *Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockLdapAuthenticator struct {
|
||||
syncSignedInUserCalled bool
|
||||
}
|
||||
|
@ -62,6 +62,15 @@ func GetContextHandler() macaron.Handler {
|
||||
ctx.Data["ctx"] = ctx
|
||||
|
||||
c.Map(ctx)
|
||||
|
||||
// update last seen at
|
||||
// update last seen every 5min
|
||||
if ctx.ShouldUpdateLastSeenAt() {
|
||||
ctx.Logger.Debug("Updating last user_seen_at", "user_id", ctx.UserId)
|
||||
if err := bus.Dispatch(&m.UpdateUserLastSeenAtCommand{UserId: ctx.UserId}); err != nil {
|
||||
ctx.Logger.Error("Failed to update last_seen_at", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -99,7 +108,7 @@ func initContextWithUserSessionCookie(ctx *Context, orgId int64) bool {
|
||||
|
||||
query := m.GetSignedInUserQuery{UserId: userId, OrgId: orgId}
|
||||
if err := bus.Dispatch(&query); err != nil {
|
||||
ctx.Logger.Error("Failed to get user with id", "userId", userId)
|
||||
ctx.Logger.Error("Failed to get user with id", "userId", userId, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -103,6 +103,8 @@ type SessionStore interface {
|
||||
Destory(*Context) error
|
||||
// init
|
||||
Start(*Context) error
|
||||
// RegenerateId regenerates the session id
|
||||
RegenerateId(*Context) error
|
||||
}
|
||||
|
||||
type SessionWrapper struct {
|
||||
@ -116,6 +118,12 @@ func (s *SessionWrapper) Start(c *Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *SessionWrapper) RegenerateId(c *Context) error {
|
||||
var err error
|
||||
s.session, err = s.manager.RegenerateId(c.Context)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *SessionWrapper) Set(k interface{}, v interface{}) error {
|
||||
if s.session != nil {
|
||||
return s.session.Set(k, v)
|
||||
|
@ -112,9 +112,11 @@ type GetOrgUsersQuery struct {
|
||||
// Projections and DTOs
|
||||
|
||||
type OrgUserDTO struct {
|
||||
OrgId int64 `json:"orgId"`
|
||||
UserId int64 `json:"userId"`
|
||||
Email string `json:"email"`
|
||||
Login string `json:"login"`
|
||||
Role string `json:"role"`
|
||||
OrgId int64 `json:"orgId"`
|
||||
UserId int64 `json:"userId"`
|
||||
Email string `json:"email"`
|
||||
Login string `json:"login"`
|
||||
Role string `json:"role"`
|
||||
LastSeenAt time.Time `json:"lastSeenAt"`
|
||||
LastSeenAtAge string `json:"lastSeenAtAge"`
|
||||
}
|
||||
|
@ -1,11 +1,13 @@
|
||||
package models
|
||||
|
||||
type SystemStats struct {
|
||||
DashboardCount int64
|
||||
UserCount int64
|
||||
OrgCount int64
|
||||
PlaylistCount int64
|
||||
AlertCount int64
|
||||
Dashboards int64
|
||||
Datasources int64
|
||||
Users int64
|
||||
ActiveUsers int64
|
||||
Orgs int64
|
||||
Playlists int64
|
||||
Alerts int64
|
||||
}
|
||||
|
||||
type DataSourceStats struct {
|
||||
@ -22,15 +24,16 @@ type GetDataSourceStatsQuery struct {
|
||||
}
|
||||
|
||||
type AdminStats struct {
|
||||
UserCount int `json:"user_count"`
|
||||
OrgCount int `json:"org_count"`
|
||||
DashboardCount int `json:"dashboard_count"`
|
||||
DbSnapshotCount int `json:"db_snapshot_count"`
|
||||
DbTagCount int `json:"db_tag_count"`
|
||||
DataSourceCount int `json:"data_source_count"`
|
||||
PlaylistCount int `json:"playlist_count"`
|
||||
StarredDbCount int `json:"starred_db_count"`
|
||||
AlertCount int `json:"alert_count"`
|
||||
Users int `json:"users"`
|
||||
Orgs int `json:"orgs"`
|
||||
Dashboards int `json:"dashboards"`
|
||||
Snapshots int `json:"snapshots"`
|
||||
Tags int `json:"tags"`
|
||||
Datasources int `json:"datasources"`
|
||||
Playlists int `json:"playlists"`
|
||||
Stars int `json:"stars"`
|
||||
Alerts int `json:"alerts"`
|
||||
ActiveUsers int `json:"activeUsers"`
|
||||
}
|
||||
|
||||
type GetAdminStatsQuery struct {
|
||||
|
@ -33,8 +33,9 @@ type User struct {
|
||||
IsAdmin bool
|
||||
OrgId int64
|
||||
|
||||
Created time.Time
|
||||
Updated time.Time
|
||||
Created time.Time
|
||||
Updated time.Time
|
||||
LastSeenAt time.Time
|
||||
}
|
||||
|
||||
func (u *User) NameOrFallback() string {
|
||||
@ -127,6 +128,7 @@ type GetUserProfileQuery struct {
|
||||
}
|
||||
|
||||
type SearchUsersQuery struct {
|
||||
OrgId int64
|
||||
Query string
|
||||
Page int
|
||||
Limit int
|
||||
@ -160,6 +162,15 @@ type SignedInUser struct {
|
||||
ApiKeyId int64
|
||||
IsGrafanaAdmin bool
|
||||
HelpFlags1 HelpFlags1
|
||||
LastSeenAt time.Time
|
||||
}
|
||||
|
||||
func (u *SignedInUser) ShouldUpdateLastSeenAt() bool {
|
||||
return u.UserId > 0 && time.Since(u.LastSeenAt) > time.Minute*5
|
||||
}
|
||||
|
||||
type UpdateUserLastSeenAtCommand struct {
|
||||
UserId int64
|
||||
}
|
||||
|
||||
func (user *SignedInUser) HasRole(role RoleType) bool {
|
||||
@ -181,11 +192,13 @@ type UserProfileDTO struct {
|
||||
}
|
||||
|
||||
type UserSearchHitDTO struct {
|
||||
Id int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Login string `json:"login"`
|
||||
Email string `json:"email"`
|
||||
IsAdmin bool `json:"isAdmin"`
|
||||
Id int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Login string `json:"login"`
|
||||
Email string `json:"email"`
|
||||
IsAdmin bool `json:"isAdmin"`
|
||||
LastSeenAt time.Time `json:"lastSeenAt"`
|
||||
LastSeenAtAge string `json:"lastSeenAtAge"`
|
||||
}
|
||||
|
||||
type UserIdDTO struct {
|
||||
|
@ -21,7 +21,7 @@ func init() {
|
||||
<h3 class="page-heading">PagerDuty settings</h3>
|
||||
<div class="gf-form">
|
||||
<span class="gf-form-label width-14">Integration Key</span>
|
||||
<input type="text" required class="gf-form-input max-width-22" ng-model="ctrl.model.settings.integrationKey" placeholder="Pagerduty integeration Key"></input>
|
||||
<input type="text" required class="gf-form-input max-width-22" ng-model="ctrl.model.settings.integrationKey" placeholder="Pagerduty Integration Key"></input>
|
||||
</div>
|
||||
<div class="gf-form">
|
||||
<gf-form-switch
|
||||
|
@ -103,4 +103,8 @@ func addUserMigrations(mg *Migrator) {
|
||||
{Name: "company", Type: DB_NVarchar, Length: 255, Nullable: true},
|
||||
{Name: "theme", Type: DB_NVarchar, Length: 255, Nullable: true},
|
||||
}))
|
||||
|
||||
mg.AddMigration("Add last_seen_at column to user", NewAddColumnMigration(userV2, &Column{
|
||||
Name: "last_seen_at", Type: DB_DateTime, Nullable: true,
|
||||
}))
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -71,11 +72,18 @@ func GetOrgUsers(query *m.GetOrgUsersQuery) error {
|
||||
sess := x.Table("org_user")
|
||||
sess.Join("INNER", "user", fmt.Sprintf("org_user.user_id=%s.id", x.Dialect().Quote("user")))
|
||||
sess.Where("org_user.org_id=?", query.OrgId)
|
||||
sess.Cols("org_user.org_id", "org_user.user_id", "user.email", "user.login", "org_user.role")
|
||||
sess.Cols("org_user.org_id", "org_user.user_id", "user.email", "user.login", "org_user.role", "user.last_seen_at")
|
||||
sess.Asc("user.email", "user.login")
|
||||
|
||||
err := sess.Find(&query.Result)
|
||||
return err
|
||||
if err := sess.Find(&query.Result); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, user := range query.Result {
|
||||
user.LastSeenAtAge = util.GetAgeString(user.LastSeenAt)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveOrgUser(cmd *m.RemoveOrgUserCommand) error {
|
||||
|
@ -53,7 +53,7 @@ func EnsureAdminUser() {
|
||||
return
|
||||
}
|
||||
|
||||
if statsQuery.Result.UserCount > 0 {
|
||||
if statsQuery.Result.Users > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ func getEngine() (*xorm.Engine, error) {
|
||||
DbCfg.Path = filepath.Join(setting.DataPath, DbCfg.Path)
|
||||
}
|
||||
os.MkdirAll(path.Dir(DbCfg.Path), os.ModePerm)
|
||||
cnnstr = "file:" + DbCfg.Path + "?cache=shared&mode=rwc&_loc=Local"
|
||||
cnnstr = "file:" + DbCfg.Path + "?cache=shared&mode=rwc"
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown database type: %s", DbCfg.Type)
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package sqlstore
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
)
|
||||
@ -11,6 +13,8 @@ func init() {
|
||||
bus.AddHandler("sql", GetAdminStats)
|
||||
}
|
||||
|
||||
var activeUserTimeLimit time.Duration = time.Hour * 24 * 14
|
||||
|
||||
func GetDataSourceStats(query *m.GetDataSourceStatsQuery) error {
|
||||
var rawSql = `SELECT COUNT(*) as count, type FROM data_source GROUP BY type`
|
||||
query.Result = make([]*m.DataSourceStats, 0)
|
||||
@ -27,27 +31,35 @@ func GetSystemStats(query *m.GetSystemStatsQuery) error {
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("user") + `
|
||||
) AS user_count,
|
||||
) AS users,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("org") + `
|
||||
) AS org_count,
|
||||
) AS orgs,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("dashboard") + `
|
||||
) AS dashboard_count,
|
||||
) AS dashboards,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("data_source") + `
|
||||
) AS datasources,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("playlist") + `
|
||||
) AS playlist_count,
|
||||
) AS playlists,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("alert") + `
|
||||
) AS alert_count
|
||||
) AS alerts,
|
||||
(
|
||||
SELECT COUNT(*) FROM ` + dialect.Quote("user") + ` where last_seen_at > ?
|
||||
) as active_users
|
||||
`
|
||||
|
||||
activeUserDeadlineDate := time.Now().Add(-activeUserTimeLimit)
|
||||
var stats m.SystemStats
|
||||
_, err := x.Sql(rawSql).Get(&stats)
|
||||
_, err := x.Sql(rawSql, activeUserDeadlineDate).Get(&stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -61,43 +73,48 @@ func GetAdminStats(query *m.GetAdminStatsQuery) error {
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("user") + `
|
||||
) AS user_count,
|
||||
) AS users,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("org") + `
|
||||
) AS org_count,
|
||||
) AS orgs,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("dashboard") + `
|
||||
) AS dashboard_count,
|
||||
) AS dashboards,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("dashboard_snapshot") + `
|
||||
) AS db_snapshot_count,
|
||||
) AS snapshots,
|
||||
(
|
||||
SELECT COUNT( DISTINCT ( ` + dialect.Quote("term") + ` ))
|
||||
FROM ` + dialect.Quote("dashboard_tag") + `
|
||||
) AS db_tag_count,
|
||||
) AS tags,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("data_source") + `
|
||||
) AS data_source_count,
|
||||
) AS datasources,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("playlist") + `
|
||||
) AS playlist_count,
|
||||
) AS playlists,
|
||||
(
|
||||
SELECT COUNT(DISTINCT ` + dialect.Quote("dashboard_id") + ` )
|
||||
FROM ` + dialect.Quote("star") + `
|
||||
) AS starred_db_count,
|
||||
SELECT COUNT(*) FROM ` + dialect.Quote("star") + `
|
||||
) AS stars,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("alert") + `
|
||||
) AS alert_count
|
||||
) AS alerts,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
from ` + dialect.Quote("user") + ` where last_seen_at > ?
|
||||
) as active_users
|
||||
`
|
||||
|
||||
activeUserDeadlineDate := time.Now().Add(-activeUserTimeLimit)
|
||||
|
||||
var stats m.AdminStats
|
||||
_, err := x.Sql(rawSql).Get(&stats)
|
||||
_, err := x.Sql(rawSql, activeUserDeadlineDate).Get(&stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ func init() {
|
||||
bus.AddHandler("sql", GetUserByLogin)
|
||||
bus.AddHandler("sql", GetUserByEmail)
|
||||
bus.AddHandler("sql", SetUsingOrg)
|
||||
bus.AddHandler("sql", UpdateUserLastSeenAt)
|
||||
bus.AddHandler("sql", GetUserProfile)
|
||||
bus.AddHandler("sql", GetSignedInUser)
|
||||
bus.AddHandler("sql", SearchUsers)
|
||||
@ -260,6 +261,24 @@ func ChangeUserPassword(cmd *m.ChangeUserPasswordCommand) error {
|
||||
})
|
||||
}
|
||||
|
||||
func UpdateUserLastSeenAt(cmd *m.UpdateUserLastSeenAtCommand) error {
|
||||
return inTransaction(func(sess *DBSession) error {
|
||||
if cmd.UserId <= 0 {
|
||||
}
|
||||
|
||||
user := m.User{
|
||||
Id: cmd.UserId,
|
||||
LastSeenAt: time.Now(),
|
||||
}
|
||||
|
||||
if _, err := sess.Id(cmd.UserId).Update(&user); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func SetUsingOrg(cmd *m.SetUsingOrgCommand) error {
|
||||
getOrgsForUserCmd := &m.GetUserOrgListQuery{UserId: cmd.UserId}
|
||||
GetUserOrgList(getOrgsForUserCmd)
|
||||
@ -324,15 +343,16 @@ func GetSignedInUser(query *m.GetSignedInUserQuery) error {
|
||||
}
|
||||
|
||||
var rawSql = `SELECT
|
||||
u.id as user_id,
|
||||
u.is_admin as is_grafana_admin,
|
||||
u.email as email,
|
||||
u.login as login,
|
||||
u.name as name,
|
||||
u.help_flags1 as help_flags1,
|
||||
org.name as org_name,
|
||||
org_user.role as org_role,
|
||||
org.id as org_id
|
||||
u.id as user_id,
|
||||
u.is_admin as is_grafana_admin,
|
||||
u.email as email,
|
||||
u.login as login,
|
||||
u.name as name,
|
||||
u.help_flags1 as help_flags1,
|
||||
u.last_seen_at as last_seen_at,
|
||||
org.name as org_name,
|
||||
org_user.role as org_role,
|
||||
org.id as org_id
|
||||
FROM ` + dialect.Quote("user") + ` as u
|
||||
LEFT OUTER JOIN org_user on org_user.org_id = ` + orgId + ` and org_user.user_id = u.id
|
||||
LEFT OUTER JOIN org on org.id = org_user.org_id `
|
||||
@ -367,27 +387,49 @@ func SearchUsers(query *m.SearchUsersQuery) error {
|
||||
query.Result = m.SearchUserQueryResult{
|
||||
Users: make([]*m.UserSearchHitDTO, 0),
|
||||
}
|
||||
|
||||
queryWithWildcards := "%" + query.Query + "%"
|
||||
|
||||
whereConditions := make([]string, 0)
|
||||
whereParams := make([]interface{}, 0)
|
||||
sess := x.Table("user")
|
||||
if query.Query != "" {
|
||||
sess.Where("email LIKE ? OR name LIKE ? OR login like ?", queryWithWildcards, queryWithWildcards, queryWithWildcards)
|
||||
|
||||
if query.OrgId > 0 {
|
||||
whereConditions = append(whereConditions, "org_id = ?")
|
||||
whereParams = append(whereParams, query.OrgId)
|
||||
}
|
||||
|
||||
if query.Query != "" {
|
||||
whereConditions = append(whereConditions, "(email LIKE ? OR name LIKE ? OR login like ?)")
|
||||
whereParams = append(whereParams, queryWithWildcards, queryWithWildcards, queryWithWildcards)
|
||||
}
|
||||
|
||||
if len(whereConditions) > 0 {
|
||||
sess.Where(strings.Join(whereConditions, " AND "), whereParams...)
|
||||
}
|
||||
|
||||
offset := query.Limit * (query.Page - 1)
|
||||
sess.Limit(query.Limit, offset)
|
||||
sess.Cols("id", "email", "name", "login", "is_admin")
|
||||
sess.Cols("id", "email", "name", "login", "is_admin", "last_seen_at")
|
||||
if err := sess.Find(&query.Result.Users); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get total
|
||||
user := m.User{}
|
||||
|
||||
countSess := x.Table("user")
|
||||
if query.Query != "" {
|
||||
countSess.Where("email LIKE ? OR name LIKE ? OR login like ?", queryWithWildcards, queryWithWildcards, queryWithWildcards)
|
||||
|
||||
if len(whereConditions) > 0 {
|
||||
countSess.Where(strings.Join(whereConditions, " AND "), whereParams...)
|
||||
}
|
||||
|
||||
count, err := countSess.Count(&user)
|
||||
query.Result.TotalCount = count
|
||||
|
||||
for _, user := range query.Result.Users {
|
||||
user.LastSeenAtAge = util.GetAgeString(user.LastSeenAt)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -74,6 +74,19 @@ func (m *MySqlMacroEngine) EvaluateMacro(name string, args []string) (string, er
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= FROM_UNIXTIME(%d) AND %s <= FROM_UNIXTIME(%d)", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__timeFrom":
|
||||
return fmt.Sprintf("FROM_UNIXTIME(%d)", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
case "__timeTo":
|
||||
return fmt.Sprintf("FROM_UNIXTIME(%d)", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__unixEpochFilter":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__unixEpochFrom":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
case "__unixEpochTo":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
default:
|
||||
return "", fmt.Errorf("Unknown macro %v", name)
|
||||
}
|
||||
|
@ -39,5 +39,60 @@ func TestMacroEngine(t *testing.T) {
|
||||
So(sql, ShouldEqual, "WHERE time_column >= FROM_UNIXTIME(18446744066914186738) AND time_column <= FROM_UNIXTIME(18446744066914187038)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914186738)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914187038)")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__unixEpochFilter(18446744066914186738)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738 >= 18446744066914186738 AND 18446744066914186738 <= 18446744066914187038")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914187038")
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ func (e MysqlExecutor) getTypedRowData(types []*sql.ColumnType, rows *core.Rows)
|
||||
values := make([]interface{}, len(types))
|
||||
|
||||
for i, stype := range types {
|
||||
e.log.Info("type", "type", stype)
|
||||
e.log.Debug("type", "type", stype)
|
||||
switch stype.DatabaseTypeName() {
|
||||
case mysql.FieldTypeNameTiny:
|
||||
values[i] = new(int8)
|
||||
@ -205,6 +205,8 @@ func (e MysqlExecutor) getTypedRowData(types []*sql.ColumnType, rows *core.Rows)
|
||||
values[i] = new(float32)
|
||||
case mysql.FieldTypeNameNewDecimal:
|
||||
values[i] = new(float64)
|
||||
case mysql.FieldTypeNameFloat:
|
||||
values[i] = new(float64)
|
||||
case mysql.FieldTypeNameTimestamp:
|
||||
values[i] = new(time.Time)
|
||||
case mysql.FieldTypeNameDateTime:
|
||||
@ -215,6 +217,20 @@ func (e MysqlExecutor) getTypedRowData(types []*sql.ColumnType, rows *core.Rows)
|
||||
values[i] = new(int16)
|
||||
case mysql.FieldTypeNameNULL:
|
||||
values[i] = nil
|
||||
case mysql.FieldTypeNameBit:
|
||||
values[i] = new([]byte)
|
||||
case mysql.FieldTypeNameBLOB:
|
||||
values[i] = new(string)
|
||||
case mysql.FieldTypeNameTinyBLOB:
|
||||
values[i] = new(string)
|
||||
case mysql.FieldTypeNameMediumBLOB:
|
||||
values[i] = new(string)
|
||||
case mysql.FieldTypeNameLongBLOB:
|
||||
values[i] = new(string)
|
||||
case mysql.FieldTypeNameString:
|
||||
values[i] = new(string)
|
||||
case mysql.FieldTypeNameDate:
|
||||
values[i] = new(string)
|
||||
default:
|
||||
return nil, fmt.Errorf("Database type %s not supported", stype.DatabaseTypeName())
|
||||
}
|
||||
|
124
pkg/tsdb/mysql/mysql_test.go
Normal file
124
pkg/tsdb/mysql/mysql_test.go
Normal file
@ -0,0 +1,124 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/xorm"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/sqlutil"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
// To run this test, remove the Skip from SkipConvey
|
||||
// and set up a MySQL db named grafana_tests and a user/password grafana/password
|
||||
func TestMySQL(t *testing.T) {
|
||||
SkipConvey("MySQL", t, func() {
|
||||
x := InitMySQLTestDB(t)
|
||||
|
||||
executor := &MysqlExecutor{
|
||||
engine: x,
|
||||
log: log.New("tsdb.mysql"),
|
||||
}
|
||||
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
db := sess.DB()
|
||||
|
||||
sql := "CREATE TABLE `mysql_types` ("
|
||||
sql += "`atinyint` tinyint(1),"
|
||||
sql += "`avarchar` varchar(3),"
|
||||
sql += "`achar` char(3),"
|
||||
sql += "`amediumint` mediumint,"
|
||||
sql += "`asmallint` smallint,"
|
||||
sql += "`abigint` bigint,"
|
||||
sql += "`aint` int(11),"
|
||||
sql += "`adouble` double(10,2),"
|
||||
sql += "`anewdecimal` decimal(10,2),"
|
||||
sql += "`afloat` float(10,2),"
|
||||
sql += "`atimestamp` timestamp NOT NULL,"
|
||||
sql += "`adatetime` datetime,"
|
||||
sql += "`atime` time,"
|
||||
// sql += "`ayear` year," // Crashes xorm when running cleandb
|
||||
sql += "`abit` bit(1),"
|
||||
sql += "`atinytext` tinytext,"
|
||||
sql += "`atinyblob` tinyblob,"
|
||||
sql += "`atext` text,"
|
||||
sql += "`ablob` blob,"
|
||||
sql += "`amediumtext` mediumtext,"
|
||||
sql += "`amediumblob` mediumblob,"
|
||||
sql += "`alongtext` longtext,"
|
||||
sql += "`alongblob` longblob,"
|
||||
sql += "`aenum` enum('val1', 'val2'),"
|
||||
sql += "`aset` set('a', 'b', 'c', 'd'),"
|
||||
sql += "`adate` date"
|
||||
sql += ") ENGINE=InnoDB DEFAULT CHARSET=latin1;"
|
||||
_, err := sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
sql = "INSERT INTO `mysql_types` "
|
||||
sql += "(`atinyint`, `avarchar`, `achar`, `amediumint`, `asmallint`, `abigint`, `aint`, `adouble`, "
|
||||
sql += "`anewdecimal`, `afloat`, `adatetime`, `atimestamp`, `atime`, `abit`, `atinytext`, "
|
||||
sql += "`atinyblob`, `atext`, `ablob`, `amediumtext`, `amediumblob`, `alongtext`, `alongblob`, "
|
||||
sql += "`aenum`, `aset`, `adate`) "
|
||||
sql += "VALUES(1, 'abc', 'def', 1, 10, 100, 1420070400, 1.11, "
|
||||
sql += "2.22, 3.33, now(), current_timestamp(), '11:11:11', 1, 'tinytext', "
|
||||
sql += "'tinyblob', 'text', 'blob', 'mediumtext', 'mediumblob', 'longtext', 'longblob', "
|
||||
sql += "'val2', 'a,b', curdate());"
|
||||
_, err = sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("TransformToTable should map MySQL column types to Go types", func() {
|
||||
rows, err := db.Query("SELECT * FROM mysql_types")
|
||||
defer rows.Close()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
queryResult := &tsdb.QueryResult{Meta: simplejson.New()}
|
||||
err = executor.TransformToTable(nil, rows, queryResult)
|
||||
So(err, ShouldBeNil)
|
||||
column := queryResult.Tables[0].Rows[0]
|
||||
So(*column[0].(*int8), ShouldEqual, 1)
|
||||
So(*column[1].(*string), ShouldEqual, "abc")
|
||||
So(*column[2].(*string), ShouldEqual, "def")
|
||||
So(*column[3].(*int32), ShouldEqual, 1)
|
||||
So(*column[4].(*int16), ShouldEqual, 10)
|
||||
So(*column[5].(*int64), ShouldEqual, 100)
|
||||
So(*column[6].(*int), ShouldEqual, 1420070400)
|
||||
So(*column[7].(*float64), ShouldEqual, 1.11)
|
||||
So(*column[8].(*float64), ShouldEqual, 2.22)
|
||||
So(*column[9].(*float64), ShouldEqual, 3.33)
|
||||
_, offset := time.Now().Zone()
|
||||
So((*column[10].(*time.Time)), ShouldHappenWithin, time.Duration(10*time.Second), time.Now().Add(time.Duration(offset)*time.Second))
|
||||
So(*column[11].(*time.Time), ShouldHappenWithin, time.Duration(10*time.Second), time.Now().Add(time.Duration(offset)*time.Second))
|
||||
So(*column[12].(*string), ShouldEqual, "11:11:11")
|
||||
So(*column[13].(*[]byte), ShouldHaveSameTypeAs, []byte{1})
|
||||
So(*column[14].(*string), ShouldEqual, "tinytext")
|
||||
So(*column[15].(*string), ShouldEqual, "tinyblob")
|
||||
So(*column[16].(*string), ShouldEqual, "text")
|
||||
So(*column[17].(*string), ShouldEqual, "blob")
|
||||
So(*column[18].(*string), ShouldEqual, "mediumtext")
|
||||
So(*column[19].(*string), ShouldEqual, "mediumblob")
|
||||
So(*column[20].(*string), ShouldEqual, "longtext")
|
||||
So(*column[21].(*string), ShouldEqual, "longblob")
|
||||
So(*column[22].(*string), ShouldEqual, "val2")
|
||||
So(*column[23].(*string), ShouldEqual, "a,b")
|
||||
So(*column[24].(*string), ShouldEqual, time.Now().Format("2006-01-02T00:00:00Z"))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func InitMySQLTestDB(t *testing.T) *xorm.Engine {
|
||||
x, err := xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, sqlutil.TestDB_Mysql.ConnStr+"&parseTime=true")
|
||||
|
||||
// x.ShowSQL()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init mysql db %v", err)
|
||||
}
|
||||
|
||||
sqlutil.CleanDB(x)
|
||||
|
||||
return x
|
||||
}
|
@ -1,7 +1,10 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
func StringsFallback2(val1 string, val2 string) string {
|
||||
@ -28,3 +31,34 @@ func SplitString(str string) []string {
|
||||
|
||||
return regexp.MustCompile("[, ]+").Split(str, -1)
|
||||
}
|
||||
|
||||
func GetAgeString(t time.Time) string {
|
||||
if t.IsZero() {
|
||||
return "?"
|
||||
}
|
||||
|
||||
sinceNow := time.Since(t)
|
||||
minutes := sinceNow.Minutes()
|
||||
years := int(math.Floor(minutes / 525600))
|
||||
months := int(math.Floor(minutes / 43800))
|
||||
days := int(math.Floor(minutes / 1440))
|
||||
hours := int(math.Floor(minutes / 60))
|
||||
|
||||
if years > 0 {
|
||||
return fmt.Sprintf("%dy", years)
|
||||
}
|
||||
if months > 0 {
|
||||
return fmt.Sprintf("%dM", months)
|
||||
}
|
||||
if days > 0 {
|
||||
return fmt.Sprintf("%dd", days)
|
||||
}
|
||||
if hours > 0 {
|
||||
return fmt.Sprintf("%dh", hours)
|
||||
}
|
||||
if int(minutes) > 0 {
|
||||
return fmt.Sprintf("%dm", int(minutes))
|
||||
}
|
||||
|
||||
return "< 1m"
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
@ -24,3 +25,15 @@ func TestSplitString(t *testing.T) {
|
||||
So(SplitString("test1 , test2 test3"), ShouldResemble, []string{"test1", "test2", "test3"})
|
||||
})
|
||||
}
|
||||
|
||||
func TestDateAge(t *testing.T) {
|
||||
Convey("GetAgeString", t, func() {
|
||||
So(GetAgeString(time.Time{}), ShouldEqual, "?")
|
||||
So(GetAgeString(time.Now().Add(-time.Second*2)), ShouldEqual, "< 1m")
|
||||
So(GetAgeString(time.Now().Add(-time.Minute*2)), ShouldEqual, "2m")
|
||||
So(GetAgeString(time.Now().Add(-time.Hour*2)), ShouldEqual, "2h")
|
||||
So(GetAgeString(time.Now().Add(-time.Hour*24*3)), ShouldEqual, "3d")
|
||||
So(GetAgeString(time.Now().Add(-time.Hour*24*67)), ShouldEqual, "2M")
|
||||
So(GetAgeString(time.Now().Add(-time.Hour*24*409)), ShouldEqual, "1y")
|
||||
})
|
||||
}
|
||||
|
@ -192,7 +192,7 @@ export function grafanaAppDirective(playlistSrv, contextSrv) {
|
||||
|
||||
// hide search
|
||||
if (body.find('.search-container').length > 0) {
|
||||
if (target.parents('.search-results-container').length === 0) {
|
||||
if (target.parents('.search-results-container, .search-field-wrapper').length === 0) {
|
||||
scope.$apply(function() {
|
||||
scope.appEvent('hide-dash-search');
|
||||
});
|
||||
|
@ -122,7 +122,7 @@ function (angular, _, coreModule) {
|
||||
vm.selectValue = function(option, event, commitChange, excludeOthers) {
|
||||
if (!option) { return; }
|
||||
|
||||
option.selected = !option.selected;
|
||||
option.selected = vm.variable.multi ? !option.selected: true;
|
||||
|
||||
commitChange = commitChange || false;
|
||||
excludeOthers = excludeOthers || false;
|
||||
|
@ -129,6 +129,10 @@ function (angular, _, coreModule, config) {
|
||||
}
|
||||
|
||||
var first = variable.current.value;
|
||||
if (first === 'default') {
|
||||
first = config.defaultDatasource;
|
||||
}
|
||||
|
||||
var ds = config.datasources[first];
|
||||
|
||||
if (ds) {
|
||||
|
@ -214,12 +214,8 @@ export class KeybindingSrv {
|
||||
if (popups.length > 0) {
|
||||
return;
|
||||
}
|
||||
// close modals
|
||||
var modalData = $(".modal").data();
|
||||
if (modalData && modalData.$scope && modalData.$scope.dismiss) {
|
||||
modalData.$scope.dismiss();
|
||||
}
|
||||
|
||||
scope.appEvent('hide-modal');
|
||||
scope.appEvent('hide-dash-editor');
|
||||
scope.appEvent('panel-change-view', {fullscreen: false, edit: false});
|
||||
});
|
||||
|
@ -3,9 +3,11 @@ export default class TableModel {
|
||||
columns: any[];
|
||||
rows: any[];
|
||||
type: string;
|
||||
columnMap: any;
|
||||
|
||||
constructor() {
|
||||
this.columns = [];
|
||||
this.columnMap = {};
|
||||
this.rows = [];
|
||||
this.type = 'table';
|
||||
}
|
||||
@ -36,4 +38,11 @@ export default class TableModel {
|
||||
this.columns[options.col].desc = false;
|
||||
}
|
||||
}
|
||||
|
||||
addColumn(col) {
|
||||
if (!this.columnMap[col.text]) {
|
||||
this.columns.push(col);
|
||||
this.columnMap[col.text] = col;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -15,39 +15,43 @@
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Total dashboards</td>
|
||||
<td>{{ctrl.stats.dashboard_count}}</td>
|
||||
<td>{{ctrl.stats.dashboards}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total users</td>
|
||||
<td>{{ctrl.stats.user_count}}</td>
|
||||
<td>{{ctrl.stats.users}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Active users (seen last 14 days)</td>
|
||||
<td>{{ctrl.stats.activeUsers}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total organizations</td>
|
||||
<td>{{ctrl.stats.org_count}}</td>
|
||||
<td>{{ctrl.stats.orgs}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total datasources</td>
|
||||
<td>{{ctrl.stats.data_source_count}}</td>
|
||||
<td>{{ctrl.stats.datasources}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total playlists</td>
|
||||
<td>{{ctrl.stats.playlist_count}}</td>
|
||||
<td>{{ctrl.stats.playlists}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total snapshots</td>
|
||||
<td>{{ctrl.stats.db_snapshot_count}}</td>
|
||||
<td>{{ctrl.stats.snapshots}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total dashboard tags</td>
|
||||
<td>{{ctrl.stats.db_tag_count}}</td>
|
||||
<td>{{ctrl.stats.tags}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total starred dashboards</td>
|
||||
<td>{{ctrl.stats.starred_db_count}}</td>
|
||||
<td>{{ctrl.stats.stars}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total alerts</td>
|
||||
<td>{{ctrl.stats.alert_count}}</td>
|
||||
<td>{{ctrl.stats.alerts}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
@ -25,7 +25,11 @@
|
||||
<th>Name</th>
|
||||
<th>Login</th>
|
||||
<th>Email</th>
|
||||
<th style="white-space: nowrap">Grafana Admin</th>
|
||||
<th>
|
||||
Seen
|
||||
<tip>Time since user was seen using Grafana</tip>
|
||||
</th>
|
||||
<th></th>
|
||||
<th></th>
|
||||
</tr>
|
||||
</thead>
|
||||
@ -35,7 +39,12 @@
|
||||
<td>{{user.name}}</td>
|
||||
<td>{{user.login}}</td>
|
||||
<td>{{user.email}}</td>
|
||||
<td>{{user.isAdmin}}</td>
|
||||
<td>
|
||||
{{user.lastSeenAtAge}}
|
||||
</td>
|
||||
<td>
|
||||
<i class="fa fa-shield" ng-show="user.isAdmin" bs-tooltip="'Grafana Admin'"></i>
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<a href="admin/users/edit/{{user.id}}" class="btn btn-inverse btn-small">
|
||||
<i class="fa fa-edit"></i>
|
||||
|
@ -10,9 +10,10 @@ export class AdHocFiltersCtrl {
|
||||
removeTagFilterSegment: any;
|
||||
|
||||
/** @ngInject */
|
||||
constructor(private uiSegmentSrv, private datasourceSrv, private $q, private templateSrv, private $rootScope) {
|
||||
constructor(private uiSegmentSrv, private datasourceSrv, private $q, private variableSrv, private $scope, private $rootScope) {
|
||||
this.removeTagFilterSegment = uiSegmentSrv.newSegment({fake: true, value: '-- remove filter --'});
|
||||
this.buildSegmentModel();
|
||||
this.$rootScope.onAppEvent('template-variable-value-updated', this.buildSegmentModel.bind(this), $scope);
|
||||
}
|
||||
|
||||
buildSegmentModel() {
|
||||
@ -141,8 +142,7 @@ export class AdHocFiltersCtrl {
|
||||
}
|
||||
|
||||
this.variable.setFilters(filters);
|
||||
this.$rootScope.$emit('template-variable-value-updated');
|
||||
this.$rootScope.$broadcast('refresh');
|
||||
this.variableSrv.variableUpdated(this.variable, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,9 +135,10 @@ export class DashNavCtrl {
|
||||
|
||||
viewJson() {
|
||||
var clone = this.dashboard.getSaveModelClone();
|
||||
var html = angular.toJson(clone, true);
|
||||
var uri = "data:application/json;charset=utf-8," + encodeURIComponent(html);
|
||||
var newWindow = window.open(uri);
|
||||
|
||||
this.$rootScope.appEvent('show-json-editor', {
|
||||
object: clone,
|
||||
});
|
||||
}
|
||||
|
||||
onFolderChange(folderId) {
|
||||
|
@ -52,12 +52,16 @@ export class SaveDashboardAsModalCtrl {
|
||||
this.clone.hideControls = false;
|
||||
this.folderTitle = dashboard.meta.folderTitle || 'Root';
|
||||
|
||||
// remove alerts
|
||||
this.clone.rows.forEach(row => {
|
||||
row.panels.forEach(panel => {
|
||||
delete panel.alert;
|
||||
// remove alerts if source dashboard is already persisted
|
||||
// do not want to create alert dupes
|
||||
if (dashboard.id > 0) {
|
||||
this.clone.rows.forEach(row => {
|
||||
row.panels.forEach(panel => {
|
||||
delete panel.thresholds;
|
||||
delete panel.alert;
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
delete this.clone.autoUpdate;
|
||||
}
|
||||
|
@ -22,20 +22,13 @@ export class SubmenuCtrl {
|
||||
}
|
||||
|
||||
variableUpdated(variable) {
|
||||
this.variableSrv.variableUpdated(variable).then(() => {
|
||||
this.$rootScope.$emit('template-variable-value-updated');
|
||||
this.$rootScope.$broadcast('refresh');
|
||||
});
|
||||
this.variableSrv.variableUpdated(variable, true);
|
||||
}
|
||||
|
||||
openEditView(editview) {
|
||||
var search = _.extend(this.$location.search(), {editview: editview});
|
||||
this.$location.search(search);
|
||||
}
|
||||
|
||||
exitBuildMode() {
|
||||
this.dashboard.toggleEditMode();
|
||||
}
|
||||
}
|
||||
|
||||
export function submenuDirective() {
|
||||
|
@ -41,6 +41,10 @@
|
||||
<tr>
|
||||
<th>Login</th>
|
||||
<th>Email</th>
|
||||
<th>
|
||||
Seen
|
||||
<tip>Time since user was seen using Grafana</tip>
|
||||
</th>
|
||||
<th>Role</th>
|
||||
<th style="width: 34px;"></th>
|
||||
</tr>
|
||||
@ -48,6 +52,7 @@
|
||||
<tr ng-repeat="user in ctrl.users">
|
||||
<td>{{user.login}}</td>
|
||||
<td><span class="ellipsis">{{user.email}}</span></td>
|
||||
<td>{{user.lastSeenAtAge}}</td>
|
||||
<td>
|
||||
<select type="text" ng-model="user.role" class="input-medium" ng-options="f for f in ['Viewer', 'Editor', 'Read Only Editor', 'Admin']" ng-change="ctrl.updateOrgUser(user)">
|
||||
</select>
|
||||
|
@ -35,14 +35,6 @@ export class VariableEditorCtrl {
|
||||
$scope.init = function() {
|
||||
$scope.mode = 'list';
|
||||
|
||||
$scope.datasources = _.filter(datasourceSrv.getMetricSources(), function(ds) {
|
||||
return !ds.meta.mixed && ds.value !== null;
|
||||
});
|
||||
|
||||
$scope.datasourceTypes = _($scope.datasources).uniqBy('meta.id').map(function(ds) {
|
||||
return {text: ds.meta.name, value: ds.meta.id};
|
||||
}).value();
|
||||
|
||||
$scope.variables = variableSrv.variables;
|
||||
$scope.reset();
|
||||
|
||||
@ -55,9 +47,8 @@ export class VariableEditorCtrl {
|
||||
|
||||
$scope.add = function() {
|
||||
if ($scope.isValid()) {
|
||||
$scope.variables.push($scope.current);
|
||||
variableSrv.addVariable($scope.current);
|
||||
$scope.update();
|
||||
$scope.dashboard.updateSubmenuVisibility();
|
||||
}
|
||||
};
|
||||
|
||||
@ -114,9 +105,8 @@ export class VariableEditorCtrl {
|
||||
$scope.duplicate = function(variable) {
|
||||
var clone = _.cloneDeep(variable.getSaveModel());
|
||||
$scope.current = variableSrv.createVariableFromModel(clone);
|
||||
$scope.variables.push($scope.current);
|
||||
$scope.current.name = 'copy_of_'+variable.name;
|
||||
$scope.dashboard.updateSubmenuVisibility();
|
||||
variableSrv.addVariable($scope.current);
|
||||
};
|
||||
|
||||
$scope.update = function() {
|
||||
@ -132,6 +122,15 @@ export class VariableEditorCtrl {
|
||||
$scope.reset = function() {
|
||||
$scope.currentIsNew = true;
|
||||
$scope.current = variableSrv.createVariableFromModel({type: 'query'});
|
||||
|
||||
// this is done here in case a new data source type variable was added
|
||||
$scope.datasources = _.filter(datasourceSrv.getMetricSources(), function(ds) {
|
||||
return !ds.meta.mixed && ds.value !== null;
|
||||
});
|
||||
|
||||
$scope.datasourceTypes = _($scope.datasources).uniqBy('meta.id').map(function(ds) {
|
||||
return {text: ds.meta.name, value: ds.meta.id};
|
||||
}).value();
|
||||
};
|
||||
|
||||
$scope.typeChanged = function() {
|
||||
@ -150,9 +149,7 @@ export class VariableEditorCtrl {
|
||||
};
|
||||
|
||||
$scope.removeVariable = function(variable) {
|
||||
var index = _.indexOf($scope.variables, variable);
|
||||
$scope.variables.splice(index, 1);
|
||||
$scope.dashboard.updateSubmenuVisibility();
|
||||
variableSrv.removeVariable(variable);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ export class QueryVariable implements Variable {
|
||||
}
|
||||
|
||||
metricFindQuery(datasource, query) {
|
||||
var options = {range: undefined};
|
||||
var options = {range: undefined, variable: this};
|
||||
|
||||
if (this.refresh === 2) {
|
||||
options.range = this.timeSrv.timeRange();
|
||||
|
@ -51,6 +51,31 @@ describe('templateSrv', function() {
|
||||
});
|
||||
});
|
||||
|
||||
describe('getAdhocFilters', function() {
|
||||
beforeEach(function() {
|
||||
initTemplateSrv([
|
||||
{type: 'datasource', name: 'ds', current: {value: 'logstash', text: 'logstash'}},
|
||||
{type: 'adhoc', name: 'test', datasource: 'oogle', filters: [1]},
|
||||
{type: 'adhoc', name: 'test2', datasource: '$ds', filters: [2]},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should return filters if datasourceName match', function() {
|
||||
var filters = _templateSrv.getAdhocFilters('oogle');
|
||||
expect(filters).to.eql([1]);
|
||||
});
|
||||
|
||||
it('should return empty array if datasourceName does not match', function() {
|
||||
var filters = _templateSrv.getAdhocFilters('oogleasdasd');
|
||||
expect(filters).to.eql([]);
|
||||
});
|
||||
|
||||
it('should return filters when datasourceName match via data source variable', function() {
|
||||
var filters = _templateSrv.getAdhocFilters('logstash');
|
||||
expect(filters).to.eql([2]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('replace can pass multi / all format', function() {
|
||||
beforeEach(function() {
|
||||
initTemplateSrv([{type: 'query', name: 'test', current: {value: ['value1', 'value2'] }}]);
|
||||
|
@ -22,6 +22,7 @@ describe('VariableSrv', function() {
|
||||
ctx.variableSrv.init({
|
||||
templating: {list: []},
|
||||
events: new Emitter(),
|
||||
updateSubmenuVisibility: sinon.stub(),
|
||||
});
|
||||
ctx.$rootScope.$digest();
|
||||
}));
|
||||
@ -41,7 +42,9 @@ describe('VariableSrv', function() {
|
||||
ctx.datasourceSrv.getMetricSources = sinon.stub().returns(scenario.metricSources);
|
||||
|
||||
|
||||
scenario.variable = ctx.variableSrv.addVariable(scenario.variableModel);
|
||||
scenario.variable = ctx.variableSrv.createVariableFromModel(scenario.variableModel);
|
||||
ctx.variableSrv.addVariable(scenario.variable);
|
||||
|
||||
ctx.variableSrv.updateOptions(scenario.variable);
|
||||
ctx.$rootScope.$digest();
|
||||
});
|
||||
|
@ -15,7 +15,6 @@ function (angular, _, kbn) {
|
||||
this._index = {};
|
||||
this._texts = {};
|
||||
this._grafanaVariables = {};
|
||||
this._adhocVariables = {};
|
||||
|
||||
// default built ins
|
||||
this._builtIns = {};
|
||||
@ -30,24 +29,16 @@ function (angular, _, kbn) {
|
||||
this.updateTemplateData = function() {
|
||||
this._index = {};
|
||||
this._filters = {};
|
||||
this._adhocVariables = {};
|
||||
|
||||
for (var i = 0; i < this.variables.length; i++) {
|
||||
var variable = this.variables[i];
|
||||
|
||||
// add adhoc filters to it's own index
|
||||
if (variable.type === 'adhoc') {
|
||||
this._adhocVariables[variable.datasource] = variable;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!variable.current || !variable.current.isNone && !variable.current.value) {
|
||||
continue;
|
||||
}
|
||||
|
||||
this._index[variable.name] = variable;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
this.variableInitialized = function(variable) {
|
||||
@ -55,11 +46,26 @@ function (angular, _, kbn) {
|
||||
};
|
||||
|
||||
this.getAdhocFilters = function(datasourceName) {
|
||||
var variable = this._adhocVariables[datasourceName];
|
||||
if (variable) {
|
||||
return variable.filters || [];
|
||||
var filters = [];
|
||||
|
||||
for (var i = 0; i < this.variables.length; i++) {
|
||||
var variable = this.variables[i];
|
||||
if (variable.type !== 'adhoc') {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (variable.datasource === datasourceName) {
|
||||
filters = filters.concat(variable.filters);
|
||||
}
|
||||
|
||||
if (variable.datasource.indexOf('$') === 0) {
|
||||
if (this.replace(variable.datasource) === datasourceName) {
|
||||
filters = filters.concat(variable.filters);
|
||||
}
|
||||
}
|
||||
}
|
||||
return [];
|
||||
|
||||
return filters;
|
||||
};
|
||||
|
||||
function luceneEscape(value) {
|
||||
|
@ -90,17 +90,24 @@ export class VariableSrv {
|
||||
return variable;
|
||||
}
|
||||
|
||||
addVariable(model) {
|
||||
var variable = this.createVariableFromModel(model);
|
||||
addVariable(variable) {
|
||||
this.variables.push(variable);
|
||||
return variable;
|
||||
this.templateSrv.updateTemplateData();
|
||||
this.dashboard.updateSubmenuVisibility();
|
||||
}
|
||||
|
||||
removeVariable(variable) {
|
||||
var index = _.indexOf(this.variables, variable);
|
||||
this.variables.splice(index, 1);
|
||||
this.templateSrv.updateTemplateData();
|
||||
this.dashboard.updateSubmenuVisibility();
|
||||
}
|
||||
|
||||
updateOptions(variable) {
|
||||
return variable.updateOptions();
|
||||
}
|
||||
|
||||
variableUpdated(variable) {
|
||||
variableUpdated(variable, emitChangeEvents?) {
|
||||
// if there is a variable lock ignore cascading update because we are in a boot up scenario
|
||||
if (variable.initLock) {
|
||||
return this.$q.when();
|
||||
@ -117,7 +124,12 @@ export class VariableSrv {
|
||||
}
|
||||
});
|
||||
|
||||
return this.$q.all(promises);
|
||||
return this.$q.all(promises).then(() => {
|
||||
if (emitChangeEvents) {
|
||||
this.$rootScope.$emit('template-variable-value-updated');
|
||||
this.$rootScope.$broadcast('refresh');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
selectOptionsForCurrentValue(variable) {
|
||||
@ -218,6 +230,26 @@ export class VariableSrv {
|
||||
// update url
|
||||
this.$location.search(params);
|
||||
}
|
||||
|
||||
setAdhocFilter(options) {
|
||||
var variable = _.find(this.variables, {type: 'adhoc', datasource: options.datasource});
|
||||
if (!variable) {
|
||||
variable = this.createVariableFromModel({name: 'Filters', type: 'adhoc', datasource: options.datasource});
|
||||
this.addVariable(variable);
|
||||
}
|
||||
|
||||
let filters = variable.filters;
|
||||
let filter = _.find(filters, {key: options.key, value: options.value});
|
||||
|
||||
if (!filter) {
|
||||
filter = {key: options.key, value: options.value};
|
||||
filters.push(filter);
|
||||
}
|
||||
|
||||
filter.operator = options.operator;
|
||||
this.variableUpdated(variable, true);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
coreModule.service('variableSrv', VariableSrv);
|
||||
|
@ -11,6 +11,8 @@ define([
|
||||
function (angular, _, moment, kbn, ElasticQueryBuilder, IndexPattern, ElasticResponse) {
|
||||
'use strict';
|
||||
|
||||
ElasticResponse = ElasticResponse.ElasticResponse;
|
||||
|
||||
/** @ngInject */
|
||||
function ElasticDatasource(instanceSettings, $q, backendSrv, templateSrv, timeSrv) {
|
||||
this.basicAuth = instanceSettings.basicAuth;
|
||||
@ -270,10 +272,17 @@ function (angular, _, moment, kbn, ElasticQueryBuilder, IndexPattern, ElasticRes
|
||||
var subObj = obj[key];
|
||||
|
||||
// Check mapping field for nested fields
|
||||
if (subObj.hasOwnProperty('properties')) {
|
||||
if (_.isObject(subObj.properties)) {
|
||||
fieldNameParts.push(key);
|
||||
getFieldsRecursively(subObj.properties);
|
||||
} else {
|
||||
}
|
||||
|
||||
if (_.isObject(subObj.fields)) {
|
||||
fieldNameParts.push(key);
|
||||
getFieldsRecursively(subObj.fields);
|
||||
}
|
||||
|
||||
if (_.isString(subObj.type)) {
|
||||
var fieldName = fieldNameParts.concat(key).join('.');
|
||||
|
||||
// Hide meta-fields and check field type
|
||||
|
@ -1,2 +0,0 @@
|
||||
declare var test: any;
|
||||
export default test;
|
@ -1,350 +0,0 @@
|
||||
define([
|
||||
"lodash",
|
||||
"./query_def"
|
||||
],
|
||||
function (_, queryDef) {
|
||||
'use strict';
|
||||
|
||||
function ElasticResponse(targets, response) {
|
||||
this.targets = targets;
|
||||
this.response = response;
|
||||
}
|
||||
|
||||
ElasticResponse.prototype.processMetrics = function(esAgg, target, seriesList, props) {
|
||||
var metric, y, i, newSeries, bucket, value;
|
||||
|
||||
for (y = 0; y < target.metrics.length; y++) {
|
||||
metric = target.metrics[y];
|
||||
if (metric.hide) {
|
||||
continue;
|
||||
}
|
||||
|
||||
switch(metric.type) {
|
||||
case 'count': {
|
||||
newSeries = { datapoints: [], metric: 'count', props: props};
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
value = bucket.doc_count;
|
||||
newSeries.datapoints.push([value, bucket.key]);
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
break;
|
||||
}
|
||||
case 'percentiles': {
|
||||
if (esAgg.buckets.length === 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
var firstBucket = esAgg.buckets[0];
|
||||
var percentiles = firstBucket[metric.id].values;
|
||||
|
||||
for (var percentileName in percentiles) {
|
||||
newSeries = {datapoints: [], metric: 'p' + percentileName, props: props, field: metric.field};
|
||||
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
var values = bucket[metric.id].values;
|
||||
newSeries.datapoints.push([values[percentileName], bucket.key]);
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case 'extended_stats': {
|
||||
for (var statName in metric.meta) {
|
||||
if (!metric.meta[statName]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
newSeries = {datapoints: [], metric: statName, props: props, field: metric.field};
|
||||
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
var stats = bucket[metric.id];
|
||||
|
||||
// add stats that are in nested obj to top level obj
|
||||
stats.std_deviation_bounds_upper = stats.std_deviation_bounds.upper;
|
||||
stats.std_deviation_bounds_lower = stats.std_deviation_bounds.lower;
|
||||
|
||||
newSeries.datapoints.push([stats[statName], bucket.key]);
|
||||
}
|
||||
|
||||
seriesList.push(newSeries);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
newSeries = { datapoints: [], metric: metric.type, field: metric.field, props: props};
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
|
||||
value = bucket[metric.id];
|
||||
if (value !== undefined) {
|
||||
if (value.normalized_value) {
|
||||
newSeries.datapoints.push([value.normalized_value, bucket.key]);
|
||||
} else {
|
||||
newSeries.datapoints.push([value.value, bucket.key]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.processAggregationDocs = function(esAgg, aggDef, target, docs, props) {
|
||||
var metric, y, i, bucket, metricName, doc;
|
||||
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
doc = _.defaults({}, props);
|
||||
doc[aggDef.field] = bucket.key;
|
||||
|
||||
for (y = 0; y < target.metrics.length; y++) {
|
||||
metric = target.metrics[y];
|
||||
|
||||
switch(metric.type) {
|
||||
case "count": {
|
||||
metricName = this._getMetricName(metric.type);
|
||||
doc[metricName] = bucket.doc_count;
|
||||
break;
|
||||
}
|
||||
case 'extended_stats': {
|
||||
for (var statName in metric.meta) {
|
||||
if (!metric.meta[statName]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
var stats = bucket[metric.id];
|
||||
// add stats that are in nested obj to top level obj
|
||||
stats.std_deviation_bounds_upper = stats.std_deviation_bounds.upper;
|
||||
stats.std_deviation_bounds_lower = stats.std_deviation_bounds.lower;
|
||||
|
||||
metricName = this._getMetricName(statName);
|
||||
doc[metricName] = stats[statName];
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
metricName = this._getMetricName(metric.type);
|
||||
var otherMetrics = _.filter(target.metrics, {type: metric.type});
|
||||
|
||||
// if more of the same metric type include field field name in property
|
||||
if (otherMetrics.length > 1) {
|
||||
metricName += ' ' + metric.field;
|
||||
}
|
||||
|
||||
doc[metricName] = bucket[metric.id].value;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
docs.push(doc);
|
||||
}
|
||||
};
|
||||
|
||||
// This is quite complex
|
||||
// neeed to recurise down the nested buckets to build series
|
||||
ElasticResponse.prototype.processBuckets = function(aggs, target, seriesList, docs, props, depth) {
|
||||
var bucket, aggDef, esAgg, aggId;
|
||||
var maxDepth = target.bucketAggs.length-1;
|
||||
|
||||
for (aggId in aggs) {
|
||||
aggDef = _.find(target.bucketAggs, {id: aggId});
|
||||
esAgg = aggs[aggId];
|
||||
|
||||
if (!aggDef) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (depth === maxDepth) {
|
||||
if (aggDef.type === 'date_histogram') {
|
||||
this.processMetrics(esAgg, target, seriesList, props);
|
||||
} else {
|
||||
this.processAggregationDocs(esAgg, aggDef, target, docs, props);
|
||||
}
|
||||
} else {
|
||||
for (var nameIndex in esAgg.buckets) {
|
||||
bucket = esAgg.buckets[nameIndex];
|
||||
props = _.clone(props);
|
||||
if (bucket.key !== void 0) {
|
||||
props[aggDef.field] = bucket.key;
|
||||
} else {
|
||||
props["filter"] = nameIndex;
|
||||
}
|
||||
if (bucket.key_as_string) {
|
||||
props[aggDef.field] = bucket.key_as_string;
|
||||
}
|
||||
this.processBuckets(bucket, target, seriesList, docs, props, depth+1);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype._getMetricName = function(metric) {
|
||||
var metricDef = _.find(queryDef.metricAggTypes, {value: metric});
|
||||
if (!metricDef) {
|
||||
metricDef = _.find(queryDef.extendedStats, {value: metric});
|
||||
}
|
||||
|
||||
return metricDef ? metricDef.text : metric;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype._getSeriesName = function(series, target, metricTypeCount) {
|
||||
var metricName = this._getMetricName(series.metric);
|
||||
|
||||
if (target.alias) {
|
||||
var regex = /\{\{([\s\S]+?)\}\}/g;
|
||||
|
||||
return target.alias.replace(regex, function(match, g1, g2) {
|
||||
var group = g1 || g2;
|
||||
|
||||
if (group.indexOf('term ') === 0) { return series.props[group.substring(5)]; }
|
||||
if (series.props[group] !== void 0) { return series.props[group]; }
|
||||
if (group === 'metric') { return metricName; }
|
||||
if (group === 'field') { return series.field; }
|
||||
|
||||
return match;
|
||||
});
|
||||
}
|
||||
|
||||
if (series.field && queryDef.isPipelineAgg(series.metric)) {
|
||||
var appliedAgg = _.find(target.metrics, { id: series.field });
|
||||
if (appliedAgg) {
|
||||
metricName += ' ' + queryDef.describeMetric(appliedAgg);
|
||||
} else {
|
||||
metricName = 'Unset';
|
||||
}
|
||||
} else if (series.field) {
|
||||
metricName += ' ' + series.field;
|
||||
}
|
||||
|
||||
var propKeys = _.keys(series.props);
|
||||
if (propKeys.length === 0) {
|
||||
return metricName;
|
||||
}
|
||||
|
||||
var name = '';
|
||||
for (var propName in series.props) {
|
||||
name += series.props[propName] + ' ';
|
||||
}
|
||||
|
||||
if (metricTypeCount === 1) {
|
||||
return name.trim();
|
||||
}
|
||||
|
||||
return name.trim() + ' ' + metricName;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.nameSeries = function(seriesList, target) {
|
||||
var metricTypeCount = _.uniq(_.map(seriesList, 'metric')).length;
|
||||
var fieldNameCount = _.uniq(_.map(seriesList, 'field')).length;
|
||||
|
||||
for (var i = 0; i < seriesList.length; i++) {
|
||||
var series = seriesList[i];
|
||||
series.target = this._getSeriesName(series, target, metricTypeCount, fieldNameCount);
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.processHits = function(hits, seriesList) {
|
||||
var series = {target: 'docs', type: 'docs', datapoints: [], total: hits.total};
|
||||
var propName, hit, doc, i;
|
||||
|
||||
for (i = 0; i < hits.hits.length; i++) {
|
||||
hit = hits.hits[i];
|
||||
doc = {
|
||||
_id: hit._id,
|
||||
_type: hit._type,
|
||||
_index: hit._index
|
||||
};
|
||||
|
||||
if (hit._source) {
|
||||
for (propName in hit._source) {
|
||||
doc[propName] = hit._source[propName];
|
||||
}
|
||||
}
|
||||
|
||||
for (propName in hit.fields) {
|
||||
doc[propName] = hit.fields[propName];
|
||||
}
|
||||
series.datapoints.push(doc);
|
||||
}
|
||||
|
||||
seriesList.push(series);
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.trimDatapoints = function(aggregations, target) {
|
||||
var histogram = _.find(target.bucketAggs, { type: 'date_histogram'});
|
||||
|
||||
var shouldDropFirstAndLast = histogram && histogram.settings && histogram.settings.trimEdges;
|
||||
if (shouldDropFirstAndLast) {
|
||||
var trim = histogram.settings.trimEdges;
|
||||
for(var prop in aggregations) {
|
||||
var points = aggregations[prop];
|
||||
if (points.datapoints.length > trim * 2) {
|
||||
points.datapoints = points.datapoints.slice(trim, points.datapoints.length - trim);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.getErrorFromElasticResponse = function(response, err) {
|
||||
var result = {};
|
||||
result.data = JSON.stringify(err, null, 4);
|
||||
if (err.root_cause && err.root_cause.length > 0 && err.root_cause[0].reason) {
|
||||
result.message = err.root_cause[0].reason;
|
||||
} else {
|
||||
result.message = err.reason || 'Unkown elatic error response';
|
||||
}
|
||||
|
||||
if (response.$$config) {
|
||||
result.config = response.$$config;
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.getTimeSeries = function() {
|
||||
var seriesList = [];
|
||||
|
||||
for (var i = 0; i < this.response.responses.length; i++) {
|
||||
var response = this.response.responses[i];
|
||||
if (response.error) {
|
||||
throw this.getErrorFromElasticResponse(this.response, response.error);
|
||||
}
|
||||
|
||||
if (response.hits && response.hits.hits.length > 0) {
|
||||
this.processHits(response.hits, seriesList);
|
||||
}
|
||||
|
||||
if (response.aggregations) {
|
||||
var aggregations = response.aggregations;
|
||||
var target = this.targets[i];
|
||||
var tmpSeriesList = [];
|
||||
var docs = [];
|
||||
|
||||
this.processBuckets(aggregations, target, tmpSeriesList, docs, {}, 0);
|
||||
this.trimDatapoints(tmpSeriesList, target);
|
||||
this.nameSeries(tmpSeriesList, target);
|
||||
|
||||
for (var y = 0; y < tmpSeriesList.length; y++) {
|
||||
seriesList.push(tmpSeriesList[y]);
|
||||
}
|
||||
|
||||
if (seriesList.length === 0 && docs.length > 0) {
|
||||
seriesList.push({target: 'docs', type: 'docs', datapoints: docs});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { data: seriesList };
|
||||
};
|
||||
|
||||
return ElasticResponse;
|
||||
});
|
360
public/app/plugins/datasource/elasticsearch/elastic_response.ts
Normal file
360
public/app/plugins/datasource/elasticsearch/elastic_response.ts
Normal file
@ -0,0 +1,360 @@
|
||||
///<reference path="../../../headers/common.d.ts" />
|
||||
|
||||
import _ from 'lodash';
|
||||
import queryDef from "./query_def";
|
||||
import TableModel from 'app/core/table_model';
|
||||
|
||||
export function ElasticResponse(targets, response) {
|
||||
this.targets = targets;
|
||||
this.response = response;
|
||||
}
|
||||
|
||||
ElasticResponse.prototype.processMetrics = function(esAgg, target, seriesList, props) {
|
||||
var metric, y, i, newSeries, bucket, value;
|
||||
|
||||
for (y = 0; y < target.metrics.length; y++) {
|
||||
metric = target.metrics[y];
|
||||
if (metric.hide) {
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (metric.type) {
|
||||
case 'count': {
|
||||
newSeries = { datapoints: [], metric: 'count', props: props};
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
value = bucket.doc_count;
|
||||
newSeries.datapoints.push([value, bucket.key]);
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
break;
|
||||
}
|
||||
case 'percentiles': {
|
||||
if (esAgg.buckets.length === 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
var firstBucket = esAgg.buckets[0];
|
||||
var percentiles = firstBucket[metric.id].values;
|
||||
|
||||
for (var percentileName in percentiles) {
|
||||
newSeries = {datapoints: [], metric: 'p' + percentileName, props: props, field: metric.field};
|
||||
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
var values = bucket[metric.id].values;
|
||||
newSeries.datapoints.push([values[percentileName], bucket.key]);
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case 'extended_stats': {
|
||||
for (var statName in metric.meta) {
|
||||
if (!metric.meta[statName]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
newSeries = {datapoints: [], metric: statName, props: props, field: metric.field};
|
||||
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
var stats = bucket[metric.id];
|
||||
|
||||
// add stats that are in nested obj to top level obj
|
||||
stats.std_deviation_bounds_upper = stats.std_deviation_bounds.upper;
|
||||
stats.std_deviation_bounds_lower = stats.std_deviation_bounds.lower;
|
||||
|
||||
newSeries.datapoints.push([stats[statName], bucket.key]);
|
||||
}
|
||||
|
||||
seriesList.push(newSeries);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
newSeries = { datapoints: [], metric: metric.type, field: metric.field, props: props};
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
|
||||
value = bucket[metric.id];
|
||||
if (value !== undefined) {
|
||||
if (value.normalized_value) {
|
||||
newSeries.datapoints.push([value.normalized_value, bucket.key]);
|
||||
} else {
|
||||
newSeries.datapoints.push([value.value, bucket.key]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.processAggregationDocs = function(esAgg, aggDef, target, table, props) {
|
||||
// add columns
|
||||
if (table.columns.length === 0) {
|
||||
for (let propKey of _.keys(props)) {
|
||||
table.addColumn({text: propKey, filterable: true});
|
||||
}
|
||||
table.addColumn({text: aggDef.field, filterable: true});
|
||||
}
|
||||
|
||||
// helper func to add values to value array
|
||||
let addMetricValue = (values, metricName, value) => {
|
||||
table.addColumn({text: metricName});
|
||||
values.push(value);
|
||||
};
|
||||
|
||||
for (let bucket of esAgg.buckets) {
|
||||
let values = [];
|
||||
|
||||
for (let propValues of _.values(props)) {
|
||||
values.push(propValues);
|
||||
}
|
||||
|
||||
// add bucket key (value)
|
||||
values.push(bucket.key);
|
||||
|
||||
for (let metric of target.metrics) {
|
||||
switch (metric.type) {
|
||||
case "count": {
|
||||
addMetricValue(values, this._getMetricName(metric.type), bucket.doc_count);
|
||||
break;
|
||||
}
|
||||
case 'extended_stats': {
|
||||
for (var statName in metric.meta) {
|
||||
if (!metric.meta[statName]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
var stats = bucket[metric.id];
|
||||
// add stats that are in nested obj to top level obj
|
||||
stats.std_deviation_bounds_upper = stats.std_deviation_bounds.upper;
|
||||
stats.std_deviation_bounds_lower = stats.std_deviation_bounds.lower;
|
||||
|
||||
addMetricValue(values, this._getMetricName(statName), stats[statName]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
let metricName = this._getMetricName(metric.type);
|
||||
let otherMetrics = _.filter(target.metrics, {type: metric.type});
|
||||
|
||||
// if more of the same metric type include field field name in property
|
||||
if (otherMetrics.length > 1) {
|
||||
metricName += ' ' + metric.field;
|
||||
}
|
||||
|
||||
addMetricValue(values, metricName, bucket[metric.id].value);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
table.rows.push(values);
|
||||
}
|
||||
};
|
||||
|
||||
// This is quite complex
|
||||
// neeed to recurise down the nested buckets to build series
|
||||
ElasticResponse.prototype.processBuckets = function(aggs, target, seriesList, table, props, depth) {
|
||||
var bucket, aggDef, esAgg, aggId;
|
||||
var maxDepth = target.bucketAggs.length-1;
|
||||
|
||||
for (aggId in aggs) {
|
||||
aggDef = _.find(target.bucketAggs, {id: aggId});
|
||||
esAgg = aggs[aggId];
|
||||
|
||||
if (!aggDef) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (depth === maxDepth) {
|
||||
if (aggDef.type === 'date_histogram') {
|
||||
this.processMetrics(esAgg, target, seriesList, props);
|
||||
} else {
|
||||
this.processAggregationDocs(esAgg, aggDef, target, table, props);
|
||||
}
|
||||
} else {
|
||||
for (var nameIndex in esAgg.buckets) {
|
||||
bucket = esAgg.buckets[nameIndex];
|
||||
props = _.clone(props);
|
||||
if (bucket.key !== void 0) {
|
||||
props[aggDef.field] = bucket.key;
|
||||
} else {
|
||||
props["filter"] = nameIndex;
|
||||
}
|
||||
if (bucket.key_as_string) {
|
||||
props[aggDef.field] = bucket.key_as_string;
|
||||
}
|
||||
this.processBuckets(bucket, target, seriesList, table, props, depth+1);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype._getMetricName = function(metric) {
|
||||
var metricDef = _.find(queryDef.metricAggTypes, {value: metric});
|
||||
if (!metricDef) {
|
||||
metricDef = _.find(queryDef.extendedStats, {value: metric});
|
||||
}
|
||||
|
||||
return metricDef ? metricDef.text : metric;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype._getSeriesName = function(series, target, metricTypeCount) {
|
||||
var metricName = this._getMetricName(series.metric);
|
||||
|
||||
if (target.alias) {
|
||||
var regex = /\{\{([\s\S]+?)\}\}/g;
|
||||
|
||||
return target.alias.replace(regex, function(match, g1, g2) {
|
||||
var group = g1 || g2;
|
||||
|
||||
if (group.indexOf('term ') === 0) { return series.props[group.substring(5)]; }
|
||||
if (series.props[group] !== void 0) { return series.props[group]; }
|
||||
if (group === 'metric') { return metricName; }
|
||||
if (group === 'field') { return series.field; }
|
||||
|
||||
return match;
|
||||
});
|
||||
}
|
||||
|
||||
if (series.field && queryDef.isPipelineAgg(series.metric)) {
|
||||
var appliedAgg = _.find(target.metrics, { id: series.field });
|
||||
if (appliedAgg) {
|
||||
metricName += ' ' + queryDef.describeMetric(appliedAgg);
|
||||
} else {
|
||||
metricName = 'Unset';
|
||||
}
|
||||
} else if (series.field) {
|
||||
metricName += ' ' + series.field;
|
||||
}
|
||||
|
||||
var propKeys = _.keys(series.props);
|
||||
if (propKeys.length === 0) {
|
||||
return metricName;
|
||||
}
|
||||
|
||||
var name = '';
|
||||
for (var propName in series.props) {
|
||||
name += series.props[propName] + ' ';
|
||||
}
|
||||
|
||||
if (metricTypeCount === 1) {
|
||||
return name.trim();
|
||||
}
|
||||
|
||||
return name.trim() + ' ' + metricName;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.nameSeries = function(seriesList, target) {
|
||||
var metricTypeCount = _.uniq(_.map(seriesList, 'metric')).length;
|
||||
var fieldNameCount = _.uniq(_.map(seriesList, 'field')).length;
|
||||
|
||||
for (var i = 0; i < seriesList.length; i++) {
|
||||
var series = seriesList[i];
|
||||
series.target = this._getSeriesName(series, target, metricTypeCount, fieldNameCount);
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.processHits = function(hits, seriesList) {
|
||||
var series = {target: 'docs', type: 'docs', datapoints: [], total: hits.total, filterable: true};
|
||||
var propName, hit, doc, i;
|
||||
|
||||
for (i = 0; i < hits.hits.length; i++) {
|
||||
hit = hits.hits[i];
|
||||
doc = {
|
||||
_id: hit._id,
|
||||
_type: hit._type,
|
||||
_index: hit._index
|
||||
};
|
||||
|
||||
if (hit._source) {
|
||||
for (propName in hit._source) {
|
||||
doc[propName] = hit._source[propName];
|
||||
}
|
||||
}
|
||||
|
||||
for (propName in hit.fields) {
|
||||
doc[propName] = hit.fields[propName];
|
||||
}
|
||||
series.datapoints.push(doc);
|
||||
}
|
||||
|
||||
seriesList.push(series);
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.trimDatapoints = function(aggregations, target) {
|
||||
var histogram = _.find(target.bucketAggs, { type: 'date_histogram'});
|
||||
|
||||
var shouldDropFirstAndLast = histogram && histogram.settings && histogram.settings.trimEdges;
|
||||
if (shouldDropFirstAndLast) {
|
||||
var trim = histogram.settings.trimEdges;
|
||||
for (var prop in aggregations) {
|
||||
var points = aggregations[prop];
|
||||
if (points.datapoints.length > trim * 2) {
|
||||
points.datapoints = points.datapoints.slice(trim, points.datapoints.length - trim);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.getErrorFromElasticResponse = function(response, err) {
|
||||
var result: any = {};
|
||||
result.data = JSON.stringify(err, null, 4);
|
||||
if (err.root_cause && err.root_cause.length > 0 && err.root_cause[0].reason) {
|
||||
result.message = err.root_cause[0].reason;
|
||||
} else {
|
||||
result.message = err.reason || 'Unkown elatic error response';
|
||||
}
|
||||
|
||||
if (response.$$config) {
|
||||
result.config = response.$$config;
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.getTimeSeries = function() {
|
||||
var seriesList = [];
|
||||
|
||||
for (var i = 0; i < this.response.responses.length; i++) {
|
||||
var response = this.response.responses[i];
|
||||
if (response.error) {
|
||||
throw this.getErrorFromElasticResponse(this.response, response.error);
|
||||
}
|
||||
|
||||
if (response.hits && response.hits.hits.length > 0) {
|
||||
this.processHits(response.hits, seriesList);
|
||||
}
|
||||
|
||||
if (response.aggregations) {
|
||||
var aggregations = response.aggregations;
|
||||
var target = this.targets[i];
|
||||
var tmpSeriesList = [];
|
||||
var table = new TableModel();
|
||||
|
||||
this.processBuckets(aggregations, target, tmpSeriesList, table, {}, 0);
|
||||
this.trimDatapoints(tmpSeriesList, target);
|
||||
this.nameSeries(tmpSeriesList, target);
|
||||
|
||||
for (var y = 0; y < tmpSeriesList.length; y++) {
|
||||
seriesList.push(tmpSeriesList[y]);
|
||||
}
|
||||
|
||||
if (table.rows.length > 0) {
|
||||
seriesList.push(table);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { data: seriesList };
|
||||
};
|
||||
|
@ -129,7 +129,10 @@ describe('ElasticDatasource', function() {
|
||||
'@timestamp': {type: 'date'},
|
||||
beat: {
|
||||
properties: {
|
||||
name: {type: 'string'},
|
||||
name: {
|
||||
fields: {raw: {type: 'keyword'}},
|
||||
type: 'string'
|
||||
},
|
||||
hostname: {type: 'string'},
|
||||
}
|
||||
},
|
||||
@ -169,6 +172,7 @@ describe('ElasticDatasource', function() {
|
||||
var fields = _.map(fieldObjects, 'text');
|
||||
expect(fields).to.eql([
|
||||
'@timestamp',
|
||||
'beat.name.raw',
|
||||
'beat.name',
|
||||
'beat.hostname',
|
||||
'system.cpu.system',
|
||||
|
@ -1,6 +1,6 @@
|
||||
|
||||
import {describe, beforeEach, it, expect} from 'test/lib/common';
|
||||
import ElasticResponse from '../elastic_response';
|
||||
import {ElasticResponse} from '../elastic_response';
|
||||
|
||||
describe('ElasticResponse', function() {
|
||||
var targets;
|
||||
@ -387,10 +387,9 @@ describe('ElasticResponse', function() {
|
||||
result = new ElasticResponse(targets, response).getTimeSeries();
|
||||
});
|
||||
|
||||
it('should return docs with byte and count', function() {
|
||||
expect(result.data[0].datapoints.length).to.be(3);
|
||||
expect(result.data[0].datapoints[0].Count).to.be(1);
|
||||
expect(result.data[0].datapoints[0].bytes).to.be(1000);
|
||||
it('should return table with byte and count', function() {
|
||||
expect(result.data[0].rows.length).to.be(3);
|
||||
expect(result.data[0].columns).to.eql([{text: 'bytes', filterable: true}, {text: 'Count'}]);
|
||||
});
|
||||
});
|
||||
|
||||
@ -530,14 +529,14 @@ describe('ElasticResponse', function() {
|
||||
|
||||
it('should return table', function() {
|
||||
expect(result.data.length).to.be(1);
|
||||
expect(result.data[0].type).to.be('docs');
|
||||
expect(result.data[0].datapoints.length).to.be(2);
|
||||
expect(result.data[0].datapoints[0].host).to.be("server-1");
|
||||
expect(result.data[0].datapoints[0].Average).to.be(1000);
|
||||
expect(result.data[0].datapoints[0].Count).to.be(369);
|
||||
expect(result.data[0].type).to.be('table');
|
||||
expect(result.data[0].rows.length).to.be(2);
|
||||
expect(result.data[0].rows[0][0]).to.be("server-1");
|
||||
expect(result.data[0].rows[0][1]).to.be(1000);
|
||||
expect(result.data[0].rows[0][2]).to.be(369);
|
||||
|
||||
expect(result.data[0].datapoints[1].host).to.be("server-2");
|
||||
expect(result.data[0].datapoints[1].Average).to.be(2000);
|
||||
expect(result.data[0].rows[1][0]).to.be("server-2");
|
||||
expect(result.data[0].rows[1][1]).to.be(2000);
|
||||
});
|
||||
});
|
||||
|
||||
@ -573,10 +572,9 @@ describe('ElasticResponse', function() {
|
||||
});
|
||||
|
||||
it('should include field in metric name', function() {
|
||||
expect(result.data[0].type).to.be('docs');
|
||||
expect(result.data[0].datapoints[0].Average).to.be(undefined);
|
||||
expect(result.data[0].datapoints[0]['Average test']).to.be(1000);
|
||||
expect(result.data[0].datapoints[0]['Average test2']).to.be(3000);
|
||||
expect(result.data[0].type).to.be('table');
|
||||
expect(result.data[0].rows[0][1]).to.be(1000);
|
||||
expect(result.data[0].rows[0][2]).to.be(3000);
|
||||
});
|
||||
});
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user