mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge branch 'master' into ace-editor
This commit is contained in:
@@ -8,12 +8,19 @@
|
||||
|
||||
* **GitHub OAuth**: Support for GitHub organizations with 100+ teams. [#8846](https://github.com/grafana/grafana/issues/8846), thx [@skwashd](https://github.com/skwashd)
|
||||
* **Graphite**: Calls to Graphite api /metrics/find now include panel or dashboad time range (from & until) in most cases, [#8055](https://github.com/grafana/grafana/issues/8055)
|
||||
* **Graphite**: Added new graphite 1.0 functions, available if you set version to 1.0.x in data source settings. New Functions: mapSeries, reduceSeries, isNonNull, groupByNodes, offsetToZero, grep, weightedAverage, removeEmptySeries, aggregateLine, averageOutsidePercentile, delay, exponentialMovingAverage, fallbackSeries, integralByInterval, interpolate, invert, linearRegression, movingMin, movingMax, movingSum, multiplySeriesWithWildcards, pow, powSeries, removeBetweenPercentile, squareRoot, timeSlice, closes [#8261](https://github.com/grafana/grafana/issues/8261)
|
||||
|
||||
## Changes
|
||||
|
||||
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
|
||||
|
||||
# 4.4.3 (unreleased)
|
||||
# 4.4.4 (unreleased)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **MySQL/Postgres**: Fix for max_idle_conn option default which was wrongly set to zero which does not mean unlimited but means zero, which in practice kind of disables connection pooling, which is not good. Fixes [#8513](https://github.com/grafana/grafana/issues/8513)
|
||||
|
||||
# 4.4.3 (2017-08-07)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
|
||||
@@ -76,8 +76,10 @@ password =
|
||||
# Example: mysql://user:secret@host:port/database
|
||||
url =
|
||||
|
||||
# Max idle conn setting default is 2
|
||||
max_idle_conn = 2
|
||||
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
max_idle_conn =
|
||||
max_open_conn =
|
||||
|
||||
# For "postgres", use either "disable", "require" or "verify-full"
|
||||
|
||||
@@ -85,8 +85,10 @@
|
||||
# For "sqlite3" only, path relative to data_path setting
|
||||
;path = grafana.db
|
||||
|
||||
# Max idle conn setting default is 2
|
||||
;max_idle_conn = 2
|
||||
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
;max_idle_conn =
|
||||
;max_open_conn =
|
||||
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ add ./files/my_htpasswd /etc/nginx/.htpasswd
|
||||
# Add system service config
|
||||
add ./files/nginx.conf /etc/nginx/nginx.conf
|
||||
add ./files/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
# Nginx
|
||||
#
|
||||
# graphite
|
||||
@@ -39,6 +40,7 @@ expose 80
|
||||
|
||||
# Carbon line receiver port
|
||||
expose 2003
|
||||
|
||||
# Carbon cache query port
|
||||
expose 7002
|
||||
|
||||
|
||||
93
docker/blocks/graphite1/Dockerfile
Normal file
93
docker/blocks/graphite1/Dockerfile
Normal file
@@ -0,0 +1,93 @@
|
||||
FROM phusion/baseimage:0.9.22
|
||||
MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com>
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y --force-yes install vim \
|
||||
nginx \
|
||||
python-dev \
|
||||
python-flup \
|
||||
python-pip \
|
||||
python-ldap \
|
||||
expect \
|
||||
git \
|
||||
memcached \
|
||||
sqlite3 \
|
||||
libffi-dev \
|
||||
libcairo2 \
|
||||
libcairo2-dev \
|
||||
python-cairo \
|
||||
python-rrdtool \
|
||||
pkg-config \
|
||||
nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# fix python dependencies (LTS Django and newer memcached/txAMQP)
|
||||
RUN pip install django==1.8.18 \
|
||||
python-memcached==1.53 \
|
||||
txAMQP==0.6.2 \
|
||||
&& pip install --upgrade pip
|
||||
|
||||
# install whisper
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper
|
||||
WORKDIR /usr/local/src/whisper
|
||||
RUN python ./setup.py install
|
||||
|
||||
# install carbon
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon
|
||||
WORKDIR /usr/local/src/carbon
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install graphite
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
WORKDIR /usr/local/src/graphite-web
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
|
||||
WORKDIR /opt/graphite/webapp
|
||||
RUN mkdir -p /var/log/graphite/ \
|
||||
&& PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings
|
||||
|
||||
# install statsd
|
||||
RUN git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
ADD conf/opt/statsd/config.js /opt/statsd/config.js
|
||||
|
||||
# config nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
ADD conf/etc/nginx/nginx.conf /etc/nginx/nginx.conf
|
||||
ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/graphite-statsd.conf
|
||||
|
||||
# init django admin
|
||||
ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp
|
||||
ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh
|
||||
RUN chmod +x /usr/local/bin/manage.sh \
|
||||
&& /usr/local/bin/django_admin_init.exp
|
||||
|
||||
# logging support
|
||||
RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx
|
||||
ADD conf/etc/logrotate.d/graphite-statsd /etc/logrotate.d/graphite-statsd
|
||||
|
||||
# daemons
|
||||
ADD conf/etc/service/carbon/run /etc/service/carbon/run
|
||||
ADD conf/etc/service/carbon-aggregator/run /etc/service/carbon-aggregator/run
|
||||
ADD conf/etc/service/graphite/run /etc/service/graphite/run
|
||||
ADD conf/etc/service/statsd/run /etc/service/statsd/run
|
||||
ADD conf/etc/service/nginx/run /etc/service/nginx/run
|
||||
|
||||
# default conf setup
|
||||
ADD conf /etc/graphite-statsd/conf
|
||||
ADD conf/etc/my_init.d/01_conf_init.sh /etc/my_init.d/01_conf_init.sh
|
||||
|
||||
# cleanup
|
||||
RUN apt-get clean\
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# defaults
|
||||
EXPOSE 80 2003-2004 2023-2024 8125/udp 8126
|
||||
VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"]
|
||||
WORKDIR /
|
||||
ENV HOME /root
|
||||
CMD ["/sbin/my_init"]
|
||||
11
docker/blocks/graphite1/conf/etc/logrotate.d/graphite-statsd
Normal file
11
docker/blocks/graphite1/conf/etc/logrotate.d/graphite-statsd
Normal file
@@ -0,0 +1,11 @@
|
||||
/var/log/*.log /var/log/*/*.log {
|
||||
weekly
|
||||
size 50M
|
||||
missingok
|
||||
rotate 10
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
copytruncate
|
||||
su root syslog
|
||||
}
|
||||
36
docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
Executable file
36
docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
conf_dir=/etc/graphite-statsd/conf
|
||||
|
||||
# auto setup graphite with default configs if /opt/graphite is missing
|
||||
# needed for the use case when a docker host volume is mounted at an of the following:
|
||||
# - /opt/graphite
|
||||
# - /opt/graphite/conf
|
||||
# - /opt/graphite/webapp/graphite
|
||||
graphite_dir_contents=$(find /opt/graphite -mindepth 1 -print -quit)
|
||||
graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit)
|
||||
graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit)
|
||||
graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit)
|
||||
if [[ -z $graphite_dir_contents ]]; then
|
||||
git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
cd /usr/local/src/graphite-web && python ./setup.py install
|
||||
fi
|
||||
if [[ -z $graphite_storage_dir_contents ]]; then
|
||||
/usr/local/bin/django_admin_init.exp
|
||||
fi
|
||||
if [[ -z $graphite_conf_dir_contents ]]; then
|
||||
cp -R $conf_dir/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
fi
|
||||
if [[ -z $graphite_webapp_dir_contents ]]; then
|
||||
cp $conf_dir/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
fi
|
||||
|
||||
# auto setup statsd with default config if /opt/statsd is missing
|
||||
# needed for the use case when a docker host volume is mounted at an of the following:
|
||||
# - /opt/statsd
|
||||
statsd_dir_contents=$(find /opt/statsd -mindepth 1 -print -quit)
|
||||
if [[ -z $statsd_dir_contents ]]; then
|
||||
git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
cp $conf_dir/opt/statsd/config.js /opt/statsd/config.js
|
||||
fi
|
||||
|
||||
96
docker/blocks/graphite1/conf/etc/nginx/nginx.conf
Normal file
96
docker/blocks/graphite1/conf/etc/nginx/nginx.conf
Normal file
@@ -0,0 +1,96 @@
|
||||
user www-data;
|
||||
worker_processes 4;
|
||||
pid /run/nginx.pid;
|
||||
daemon off;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
##
|
||||
# nginx-naxsi config
|
||||
##
|
||||
# Uncomment it if you installed nginx-naxsi
|
||||
##
|
||||
|
||||
#include /etc/nginx/naxsi_core.rules;
|
||||
|
||||
##
|
||||
# nginx-passenger config
|
||||
##
|
||||
# Uncomment it if you installed nginx-passenger
|
||||
##
|
||||
|
||||
#passenger_root /usr;
|
||||
#passenger_ruby /usr/bin/ruby;
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
||||
|
||||
|
||||
#mail {
|
||||
# # See sample authentication script at:
|
||||
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
|
||||
#
|
||||
# # auth_http localhost/auth.php;
|
||||
# # pop3_capabilities "TOP" "USER";
|
||||
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
|
||||
#
|
||||
# server {
|
||||
# listen localhost:110;
|
||||
# protocol pop3;
|
||||
# proxy on;
|
||||
# }
|
||||
#
|
||||
# server {
|
||||
# listen localhost:143;
|
||||
# protocol imap;
|
||||
# proxy on;
|
||||
# }
|
||||
#}
|
||||
@@ -0,0 +1,31 @@
|
||||
server {
|
||||
listen 80;
|
||||
root /opt/graphite/static;
|
||||
index index.html;
|
||||
|
||||
location /media {
|
||||
# django admin static files
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/media/;
|
||||
}
|
||||
|
||||
location /admin/auth/admin {
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
|
||||
}
|
||||
|
||||
location /admin/auth/user/admin {
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type';
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
}
|
||||
|
||||
}
|
||||
4
docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /opt/graphite/storage/carbon-aggregator-a.pid
|
||||
exec /usr/bin/python /opt/graphite/bin/carbon-aggregator.py start --debug 2>&1 >> /var/log/carbon-aggregator.log
|
||||
4
docker/blocks/graphite1/conf/etc/service/carbon/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/carbon/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /opt/graphite/storage/carbon-cache-a.pid
|
||||
exec /usr/bin/python /opt/graphite/bin/carbon-cache.py start --debug 2>&1 >> /var/log/carbon.log
|
||||
3
docker/blocks/graphite1/conf/etc/service/graphite/run
Executable file
3
docker/blocks/graphite1/conf/etc/service/graphite/run
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
export PYTHONPATH=/opt/graphite/webapp && exec /usr/local/bin/gunicorn wsgi --workers=4 --bind=127.0.0.1:8080 --log-file=/var/log/gunicorn.log --preload --pythonpath=/opt/graphite/webapp/graphite
|
||||
4
docker/blocks/graphite1/conf/etc/service/nginx/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/nginx/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
mkdir -p /var/log/nginx
|
||||
exec /usr/sbin/nginx -c /etc/nginx/nginx.conf
|
||||
4
docker/blocks/graphite1/conf/etc/service/statsd/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/statsd/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec /usr/bin/nodejs /opt/statsd/stats.js /opt/statsd/config.js >> /var/log/statsd.log 2>&1
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
# The form of each line in this file should be as follows:
|
||||
#
|
||||
# output_template (frequency) = method input_pattern
|
||||
#
|
||||
# This will capture any received metrics that match 'input_pattern'
|
||||
# for calculating an aggregate metric. The calculation will occur
|
||||
# every 'frequency' seconds and the 'method' can specify 'sum' or
|
||||
# 'avg'. The name of the aggregate metric will be derived from
|
||||
# 'output_template' filling in any captured fields from 'input_pattern'.
|
||||
#
|
||||
# For example, if you're metric naming scheme is:
|
||||
#
|
||||
# <env>.applications.<app>.<server>.<metric>
|
||||
#
|
||||
# You could configure some aggregations like so:
|
||||
#
|
||||
# <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
|
||||
# <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
|
||||
#
|
||||
# As an example, if the following metrics are received:
|
||||
#
|
||||
# prod.applications.apache.www01.requests
|
||||
# prod.applications.apache.www01.requests
|
||||
#
|
||||
# They would all go into the same aggregation buffer and after 60 seconds the
|
||||
# aggregate metric 'prod.applications.apache.all.requests' would be calculated
|
||||
# by summing their values.
|
||||
#
|
||||
# Template components such as <env> will match everything up to the next dot.
|
||||
# To match metric multiple components including the dots, use <<metric>> in the
|
||||
# input template:
|
||||
#
|
||||
# <env>.applications.<app>.all.<app_metric> (60) = sum <env>.applications.<app>.*.<<app_metric>>
|
||||
#
|
||||
# Note that any time this file is modified, it will be re-read automatically.
|
||||
@@ -0,0 +1,5 @@
|
||||
# This file takes a single regular expression per line
|
||||
# If USE_WHITELIST is set to True in carbon.conf, any metrics received which
|
||||
# match one of these expressions will be dropped
|
||||
# This file is reloaded automatically when changes are made
|
||||
^some\.noisy\.metric\.prefix\..*
|
||||
@@ -0,0 +1,75 @@
|
||||
# This is a configuration file with AMQP enabled
|
||||
|
||||
[cache]
|
||||
LOCAL_DATA_DIR =
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Enable AMQP if you want to receve metrics using you amqp broker
|
||||
ENABLE_AMQP = True
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
AMQP_VERBOSE = True
|
||||
|
||||
# your credentials for the amqp server
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
|
||||
# the network settings for the amqp server
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
|
||||
# if you want to include the metric name as part of the message body
|
||||
# instead of as the routing key, set this to True
|
||||
# AMQP_METRIC_NAME_IN_BODY = False
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
359
docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf
Normal file
359
docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf
Normal file
@@ -0,0 +1,359 @@
|
||||
[cache]
|
||||
# Configure carbon directories.
|
||||
#
|
||||
# OS environment variables can be used to tell carbon where graphite is
|
||||
# installed, where to read configuration from and where to write data.
|
||||
#
|
||||
# GRAPHITE_ROOT - Root directory of the graphite installation.
|
||||
# Defaults to ../
|
||||
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives).
|
||||
# Defaults to $GRAPHITE_ROOT/conf/
|
||||
# GRAPHITE_STORAGE_DIR - Storage directory for whipser/rrd/log/pid files.
|
||||
# Defaults to $GRAPHITE_ROOT/storage/
|
||||
#
|
||||
# To change other directory paths, add settings to this file. The following
|
||||
# configuration variables are available with these default values:
|
||||
#
|
||||
# STORAGE_DIR = $GRAPHITE_STORAGE_DIR
|
||||
# LOCAL_DATA_DIR = STORAGE_DIR/whisper/
|
||||
# WHITELISTS_DIR = STORAGE_DIR/lists/
|
||||
# CONF_DIR = STORAGE_DIR/conf/
|
||||
# LOG_DIR = STORAGE_DIR/log/
|
||||
# PID_DIR = STORAGE_DIR/
|
||||
#
|
||||
# For FHS style directory structures, use:
|
||||
#
|
||||
# STORAGE_DIR = /var/lib/carbon/
|
||||
# CONF_DIR = /etc/carbon/
|
||||
# LOG_DIR = /var/log/carbon/
|
||||
# PID_DIR = /var/run/
|
||||
#
|
||||
#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Enable daily log rotation. If disabled, a kill -HUP can be used after a manual rotate
|
||||
ENABLE_LOGROTATION = True
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
#
|
||||
# NOTE: The above settings must be set under [relay] and [aggregator]
|
||||
# to take effect for those daemons as well
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 500
|
||||
|
||||
# If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a
|
||||
# stop/shutdown is initiated. This helps when MAX_UPDATES_PER_SECOND is
|
||||
# relatively low and carbon has cached a lot of updates; it enables the carbon
|
||||
# daemon to shutdown more quickly.
|
||||
# MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = 50
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
# Set this to True to enable the UDP listener. By default this is off
|
||||
# because it is very common to run multiple carbon daemons and managing
|
||||
# another (rarely used) port for every carbon instance is not fun.
|
||||
ENABLE_UDP_LISTENER = False
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# Per security concerns outlined in Bug #817247 the pickle receiver
|
||||
# will use a more secure and slightly less efficient unpickler.
|
||||
# Set this to True to revert to the old-fashioned insecure unpickler.
|
||||
USE_INSECURE_UNPICKLER = False
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Set this to False to drop datapoints received after the cache
|
||||
# reaches MAX_CACHE_SIZE. If this is True (the default) then sockets
|
||||
# over which metrics are received will temporarily stop accepting
|
||||
# data until the cache size falls below 95% MAX_CACHE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# By default, carbon-cache will log every whisper update and cache hit. This can be excessive and
|
||||
# degrade performance if logging on the same volume as the whisper data is stored.
|
||||
LOG_UPDATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
LOG_CACHE_QUEUE_SORTS = True
|
||||
|
||||
# The thread that writes metrics to disk can use on of the following strategies
|
||||
# determining the order in which metrics are removed from cache and flushed to
|
||||
# disk. The default option preserves the same behavior as has been historically
|
||||
# available in version 0.9.10.
|
||||
#
|
||||
# sorted - All metrics in the cache will be counted and an ordered list of
|
||||
# them will be sorted according to the number of datapoints in the cache at the
|
||||
# moment of the list's creation. Metrics will then be flushed from the cache to
|
||||
# disk in that order.
|
||||
#
|
||||
# max - The writer thread will always pop and flush the metric from cache
|
||||
# that has the most datapoints. This will give a strong flush preference to
|
||||
# frequently updated metrics and will also reduce random file-io. Infrequently
|
||||
# updated metrics may only ever be persisted to disk at daemon shutdown if
|
||||
# there are a large number of metrics which receive very frequent updates OR if
|
||||
# disk i/o is very slow.
|
||||
#
|
||||
# naive - Metrics will be flushed from the cache to disk in an unordered
|
||||
# fashion. This strategy may be desirable in situations where the storage for
|
||||
# whisper files is solid state, CPU resources are very limited or deference to
|
||||
# the OS's i/o scheduler is expected to compensate for the random write
|
||||
# pattern.
|
||||
#
|
||||
CACHE_WRITE_STRATEGY = sorted
|
||||
|
||||
# On some systems it is desirable for whisper to write synchronously.
|
||||
# Set this option to True if you'd like to try this. Basically it will
|
||||
# shift the onus of buffering writes from the kernel into carbon's cache.
|
||||
WHISPER_AUTOFLUSH = False
|
||||
|
||||
# By default new Whisper files are created pre-allocated with the data region
|
||||
# filled with zeros to prevent fragmentation and speed up contiguous reads and
|
||||
# writes (which are common). Enabling this option will cause Whisper to create
|
||||
# the file sparsely instead. Enabling this option may allow a large increase of
|
||||
# MAX_CREATES_PER_MINUTE but may have longer term performance implications
|
||||
# depending on the underlying storage configuration.
|
||||
# WHISPER_SPARSE_CREATE = False
|
||||
|
||||
# Only beneficial on linux filesystems that support the fallocate system call.
|
||||
# It maintains the benefits of contiguous reads/writes, but with a potentially
|
||||
# much faster creation speed, by allowing the kernel to handle the block
|
||||
# allocation and zero-ing. Enabling this option may allow a large increase of
|
||||
# MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported
|
||||
# this option will gracefully fallback to standard POSIX file access methods.
|
||||
WHISPER_FALLOCATE_CREATE = True
|
||||
|
||||
# Enabling this option will cause Whisper to lock each Whisper file it writes
|
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
|
||||
# multiple carbon-cache daemons are writing to the same files
|
||||
# WHISPER_LOCK_WRITES = False
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
# AMQP_METRIC_NAME_IN_BODY = False
|
||||
|
||||
# The manhole interface allows you to SSH into the carbon daemon
|
||||
# and get a python interpreter. BE CAREFUL WITH THIS! If you do
|
||||
# something like time.sleep() in the interpreter, the whole process
|
||||
# will sleep! This is *extremely* helpful in debugging, assuming
|
||||
# you are familiar with the code. If you are not, please don't
|
||||
# mess with this, you are asking for trouble :)
|
||||
#
|
||||
# ENABLE_MANHOLE = False
|
||||
# MANHOLE_INTERFACE = 127.0.0.1
|
||||
# MANHOLE_PORT = 7222
|
||||
# MANHOLE_USER = admin
|
||||
# MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE=
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# To configure special settings for the carbon-cache instance 'b', uncomment this:
|
||||
#[cache:b]
|
||||
#LINE_RECEIVER_PORT = 2103
|
||||
#PICKLE_RECEIVER_PORT = 2104
|
||||
#CACHE_QUERY_PORT = 7102
|
||||
# and any other settings you want to customize, defaults are inherited
|
||||
# from [carbon] section.
|
||||
# You can then specify the --instance=b option to manage this instance
|
||||
|
||||
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2013
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2014
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# Carbon-relay has several options for metric routing controlled by RELAY_METHOD
|
||||
#
|
||||
# Use relay-rules.conf to route metrics to destinations based on pattern rules
|
||||
#RELAY_METHOD = rules
|
||||
#
|
||||
# Use consistent-hashing for even distribution of metrics between destinations
|
||||
#RELAY_METHOD = consistent-hashing
|
||||
#
|
||||
# Use consistent-hashing but take into account an aggregation-rules.conf shared
|
||||
# by downstream carbon-aggregator daemons. This will ensure that all metrics
|
||||
# that map to a given aggregation rule are sent to the same carbon-aggregator
|
||||
# instance.
|
||||
# Enable this for carbon-relays that send to a group of carbon-aggregators
|
||||
#RELAY_METHOD = aggregated-consistent-hashing
|
||||
RELAY_METHOD = rules
|
||||
|
||||
# If you use consistent-hashing you can add redundancy by replicating every
|
||||
# datapoint to more than one machine.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
# use multiple carbon-cache instances then it would look like this:
|
||||
#
|
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
#
|
||||
# The general form is IP:PORT:INSTANCE where the :INSTANCE part is
|
||||
# optional and refers to the "None" instance if omitted.
|
||||
#
|
||||
# Note that if the destinations are all carbon-caches then this should
|
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
|
||||
# instances listed (order matters!).
|
||||
#
|
||||
# If using RELAY_METHOD = rules, all destinations used in relay-rules.conf
|
||||
# must be defined in this list
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
|
||||
[aggregator]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2023
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2024
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# If set true, metric received will be forwarded to DESTINATIONS in addition to
|
||||
# the output of the aggregation rules. If set false the carbon-aggregator will
|
||||
# only ever send the output of aggregation.
|
||||
FORWARD_ALL = True
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
# use multiple carbon-cache instances then it would look like this:
|
||||
#
|
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
#
|
||||
# The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is
|
||||
# optional and refers to the "None" instance if omitted.
|
||||
#
|
||||
# Note that if the destinations are all carbon-caches then this should
|
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
|
||||
# instances listed (order matters!).
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# If you want to add redundancy to your data by replicating every
|
||||
# datapoint to more than one machine, increase this.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# This is the maximum number of datapoints that can be queued up
|
||||
# for a single destination. Once this limit is hit, we will
|
||||
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
|
||||
# we will drop any subsequently received datapoints.
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
|
||||
# This defines how many datapoints the aggregator remembers for
|
||||
# each metric. Aggregation only happens for datapoints that fall in
|
||||
# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
|
||||
MAX_AGGREGATION_INTERVALS = 5
|
||||
|
||||
# By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back
|
||||
# aggregated data points once every rule.frequency seconds, on a per-rule basis.
|
||||
# Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points
|
||||
# every N seconds, independent of rule frequency. This is useful, for example,
|
||||
# to be able to query partially aggregated metrics from carbon-cache without
|
||||
# having to first wait rule.frequency seconds.
|
||||
# WRITE_BACK_FREQUENCY = 0
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
@@ -0,0 +1,57 @@
|
||||
# This configuration file controls the behavior of the Dashboard UI, available
|
||||
# at http://my-graphite-server/dashboard/.
|
||||
#
|
||||
# This file must contain a [ui] section that defines values for all of the
|
||||
# following settings.
|
||||
[ui]
|
||||
default_graph_width = 400
|
||||
default_graph_height = 250
|
||||
automatic_variants = true
|
||||
refresh_interval = 60
|
||||
autocomplete_delay = 375
|
||||
merge_hover_delay = 750
|
||||
|
||||
# You can set this 'default', 'white', or a custom theme name.
|
||||
# To create a custom theme, copy the dashboard-default.css file
|
||||
# to dashboard-myThemeName.css in the content/css directory and
|
||||
# modify it to your liking.
|
||||
theme = default
|
||||
|
||||
[keyboard-shortcuts]
|
||||
toggle_toolbar = ctrl-z
|
||||
toggle_metrics_panel = ctrl-space
|
||||
erase_all_graphs = alt-x
|
||||
save_dashboard = alt-s
|
||||
completer_add_metrics = alt-enter
|
||||
completer_del_metrics = alt-backspace
|
||||
give_completer_focus = shift-space
|
||||
|
||||
# These settings apply to the UI as a whole, all other sections in this file
|
||||
# pertain only to specific metric types.
|
||||
#
|
||||
# The dashboard presents only metrics that fall into specified naming schemes
|
||||
# defined in this file. This creates a simpler, more targetted view of the
|
||||
# data. The general form for defining a naming scheme is as follows:
|
||||
#
|
||||
#[Metric Type]
|
||||
#scheme = basis.path.<field1>.<field2>.<fieldN>
|
||||
#field1.label = Foo
|
||||
#field2.label = Bar
|
||||
#
|
||||
#
|
||||
# Where each <field> will be displayed as a dropdown box
|
||||
# in the UI and the remaining portion of the namespace
|
||||
# shown in the Metric Selector panel. The .label options set the labels
|
||||
# displayed for each dropdown.
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
#[Sales]
|
||||
#scheme = sales.<channel>.<type>.<brand>
|
||||
#channel.label = Channel
|
||||
#type.label = Product Type
|
||||
#brand.label = Brand
|
||||
#
|
||||
# This defines a 'Sales' metric type that uses 3 dropdowns in the Context Selector
|
||||
# (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc)
|
||||
# will be available in the Metric Selector (upper-right panel).
|
||||
@@ -0,0 +1,38 @@
|
||||
[default]
|
||||
background = black
|
||||
foreground = white
|
||||
majorLine = white
|
||||
minorLine = grey
|
||||
lineColors = blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose
|
||||
fontName = Sans
|
||||
fontSize = 10
|
||||
fontBold = False
|
||||
fontItalic = False
|
||||
|
||||
[noc]
|
||||
background = black
|
||||
foreground = white
|
||||
majorLine = white
|
||||
minorLine = grey
|
||||
lineColors = blue,green,red,yellow,purple,brown,aqua,grey,magenta,pink,gold,rose
|
||||
fontName = Sans
|
||||
fontSize = 10
|
||||
fontBold = False
|
||||
fontItalic = False
|
||||
|
||||
[plain]
|
||||
background = white
|
||||
foreground = black
|
||||
minorLine = grey
|
||||
majorLine = rose
|
||||
|
||||
[summary]
|
||||
background = black
|
||||
lineColors = #6666ff, #66ff66, #ff6666
|
||||
|
||||
[alphas]
|
||||
background = white
|
||||
foreground = black
|
||||
majorLine = grey
|
||||
minorLine = rose
|
||||
lineColors = 00ff00aa,ff000077,00337799
|
||||
@@ -0,0 +1,21 @@
|
||||
# Relay destination rules for carbon-relay. Entries are scanned in order,
|
||||
# and the first pattern a metric matches will cause processing to cease after sending
|
||||
# unless `continue` is set to true
|
||||
#
|
||||
# [name]
|
||||
# pattern = <regex>
|
||||
# destinations = <list of destination addresses>
|
||||
# continue = <boolean> # default: False
|
||||
#
|
||||
# name: Arbitrary unique name to identify the rule
|
||||
# pattern: Regex pattern to match against the metric name
|
||||
# destinations: Comma-separated list of destinations.
|
||||
# ex: 127.0.0.1, 10.1.2.3:2004, 10.1.2.4:2004:a, myserver.mydomain.com
|
||||
# continue: Continue processing rules if this rule matches (default: False)
|
||||
|
||||
# You must have exactly one section with 'default = true'
|
||||
# Note that all destinations listed must also exist in carbon.conf
|
||||
# in the DESTINATIONS setting in the [relay] section
|
||||
[default]
|
||||
default = true
|
||||
destinations = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
@@ -0,0 +1,18 @@
|
||||
# This file defines regular expression patterns that can be used to
|
||||
# rewrite metric names in a search & replace fashion. It consists of two
|
||||
# sections, [pre] and [post]. The rules in the pre section are applied to
|
||||
# metric names as soon as they are received. The post rules are applied
|
||||
# after aggregation has taken place.
|
||||
#
|
||||
# The general form of each rule is as follows:
|
||||
#
|
||||
# regex-pattern = replacement-text
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
# [post]
|
||||
# _sum$ =
|
||||
# _avg$ =
|
||||
#
|
||||
# These rules would strip off a suffix of _sum or _avg from any metric names
|
||||
# after aggregation.
|
||||
@@ -0,0 +1,43 @@
|
||||
# Aggregation methods for whisper files. Entries are scanned in order,
|
||||
# and first match wins. This file is scanned for changes every 60 seconds
|
||||
#
|
||||
# [name]
|
||||
# pattern = <regex>
|
||||
# xFilesFactor = <float between 0 and 1>
|
||||
# aggregationMethod = <average|sum|last|max|min>
|
||||
#
|
||||
# name: Arbitrary unique name for the rule
|
||||
# pattern: Regex pattern to match against the metric name
|
||||
# xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur
|
||||
# aggregationMethod: function to apply to data points for aggregation
|
||||
#
|
||||
[min]
|
||||
pattern = \.lower$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.upper(_\d+)?$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.sum$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[count]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[count_legacy]
|
||||
pattern = ^stats_counts.*
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.3
|
||||
aggregationMethod = average
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
# Schema definitions for Whisper files. Entries are scanned in order,
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
# This file takes a single regular expression per line
|
||||
# If USE_WHITELIST is set to True in carbon.conf, only metrics received which
|
||||
# match one of these expressions will be persisted. If this file is empty or
|
||||
# missing, all metrics will pass through.
|
||||
# This file is reloaded automatically when changes are made
|
||||
.*
|
||||
@@ -0,0 +1,94 @@
|
||||
"""Copyright 2008 Orbitz WorldWide
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License."""
|
||||
|
||||
# Django settings for graphite project.
|
||||
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
|
||||
from os.path import dirname, join, abspath
|
||||
|
||||
|
||||
#Django settings below, do not touch!
|
||||
APPEND_SLASH = False
|
||||
TEMPLATE_DEBUG = False
|
||||
|
||||
TEMPLATES = [
|
||||
{
|
||||
'BACKEND': 'django.template.backends.django.DjangoTemplates',
|
||||
'DIRS': [
|
||||
join(dirname( abspath(__file__) ), 'templates')
|
||||
],
|
||||
'APP_DIRS': True,
|
||||
'OPTIONS': {
|
||||
'context_processors': [
|
||||
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
|
||||
# list if you haven't customized them:
|
||||
'django.contrib.auth.context_processors.auth',
|
||||
'django.template.context_processors.debug',
|
||||
'django.template.context_processors.i18n',
|
||||
'django.template.context_processors.media',
|
||||
'django.template.context_processors.static',
|
||||
'django.template.context_processors.tz',
|
||||
'django.contrib.messages.context_processors.messages',
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
# Language code for this installation. All choices can be found here:
|
||||
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
|
||||
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
|
||||
LANGUAGE_CODE = 'en-us'
|
||||
|
||||
# Absolute path to the directory that holds media.
|
||||
MEDIA_ROOT = ''
|
||||
|
||||
# URL that handles the media served from MEDIA_ROOT.
|
||||
# Example: "http://media.lawrence.com"
|
||||
MEDIA_URL = ''
|
||||
|
||||
MIDDLEWARE_CLASSES = (
|
||||
'graphite.middleware.LogExceptionsMiddleware',
|
||||
'django.middleware.common.CommonMiddleware',
|
||||
'django.middleware.gzip.GZipMiddleware',
|
||||
'django.contrib.sessions.middleware.SessionMiddleware',
|
||||
'django.contrib.auth.middleware.AuthenticationMiddleware',
|
||||
'django.contrib.messages.middleware.MessageMiddleware',
|
||||
)
|
||||
|
||||
ROOT_URLCONF = 'graphite.urls'
|
||||
|
||||
INSTALLED_APPS = (
|
||||
'graphite.metrics',
|
||||
'graphite.render',
|
||||
'graphite.browser',
|
||||
'graphite.composer',
|
||||
'graphite.account',
|
||||
'graphite.dashboard',
|
||||
'graphite.whitelist',
|
||||
'graphite.events',
|
||||
'graphite.url_shortener',
|
||||
'django.contrib.auth',
|
||||
'django.contrib.sessions',
|
||||
'django.contrib.admin',
|
||||
'django.contrib.contenttypes',
|
||||
'django.contrib.staticfiles',
|
||||
'tagging',
|
||||
)
|
||||
|
||||
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
|
||||
|
||||
GRAPHITE_WEB_APP_SETTINGS_LOADED = True
|
||||
|
||||
STATIC_URL = '/static/'
|
||||
|
||||
STATIC_ROOT = '/opt/graphite/static/'
|
||||
@@ -0,0 +1,215 @@
|
||||
## Graphite local_settings.py
|
||||
# Edit this file to customize the default Graphite webapp settings
|
||||
#
|
||||
# Additional customizations to Django settings can be added to this file as well
|
||||
|
||||
#####################################
|
||||
# General Configuration #
|
||||
#####################################
|
||||
# Set this to a long, random unique string to use as a secret key for this
|
||||
# install. This key is used for salting of hashes used in auth tokens,
|
||||
# CRSF middleware, cookie storage, etc. This should be set identically among
|
||||
# instances if used behind a load balancer.
|
||||
#SECRET_KEY = 'UNSAFE_DEFAULT'
|
||||
|
||||
# In Django 1.5+ set this to the list of hosts your graphite instances is
|
||||
# accessible as. See:
|
||||
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
|
||||
#ALLOWED_HOSTS = [ '*' ]
|
||||
|
||||
# Set your local timezone (Django's default is America/Chicago)
|
||||
# If your graphs appear to be offset by a couple hours then this probably
|
||||
# needs to be explicitly set to your local timezone.
|
||||
#TIME_ZONE = 'America/Los_Angeles'
|
||||
|
||||
# Override this to provide documentation specific to your Graphite deployment
|
||||
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
|
||||
|
||||
# Logging
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
#LOG_METRIC_ACCESS = True
|
||||
|
||||
# Enable full debug page display on exceptions (Internal Server Error pages)
|
||||
#DEBUG = True
|
||||
|
||||
# If using RRD files and rrdcached, set to the address or socket of the daemon
|
||||
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
|
||||
|
||||
# This lists the memcached servers that will be used by this webapp.
|
||||
# If you have a cluster of webapps you should ensure all of them
|
||||
# have the *exact* same value for this setting. That will maximize cache
|
||||
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
|
||||
# memcached entirely.
|
||||
#
|
||||
# You should not use the loopback address (127.0.0.1) here if using clustering
|
||||
# as every webapp in the cluster should use the exact same values to prevent
|
||||
# unneeded cache misses. Set to [] to disable caching of images and fetched data
|
||||
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
|
||||
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
|
||||
|
||||
|
||||
#####################################
|
||||
# Filesystem Paths #
|
||||
#####################################
|
||||
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
|
||||
# to somewhere else
|
||||
#GRAPHITE_ROOT = '/opt/graphite'
|
||||
|
||||
# Most installs done outside of a separate tree such as /opt/graphite will only
|
||||
# need to change these three settings. Note that the default settings for each
|
||||
# of these is relative to GRAPHITE_ROOT
|
||||
#CONF_DIR = '/opt/graphite/conf'
|
||||
#STORAGE_DIR = '/opt/graphite/storage'
|
||||
#CONTENT_DIR = '/opt/graphite/webapp/content'
|
||||
|
||||
# To further or fully customize the paths, modify the following. Note that the
|
||||
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
|
||||
#
|
||||
## Webapp config files
|
||||
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
|
||||
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
|
||||
|
||||
## Data directories
|
||||
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
|
||||
#WHISPER_DIR = '/opt/graphite/storage/whisper'
|
||||
#RRD_DIR = '/opt/graphite/storage/rrd'
|
||||
#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
|
||||
#LOG_DIR = '/opt/graphite/storage/log/webapp'
|
||||
#INDEX_FILE = '/opt/graphite/storage/index' # Search index file
|
||||
|
||||
|
||||
#####################################
|
||||
# Email Configuration #
|
||||
#####################################
|
||||
# This is used for emailing rendered Graphs
|
||||
# Default backend is SMTP
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
|
||||
#EMAIL_HOST = 'localhost'
|
||||
#EMAIL_PORT = 25
|
||||
#EMAIL_HOST_USER = ''
|
||||
#EMAIL_HOST_PASSWORD = ''
|
||||
#EMAIL_USE_TLS = False
|
||||
# To drop emails on the floor, enable the Dummy backend:
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
|
||||
|
||||
|
||||
#####################################
|
||||
# Authentication Configuration #
|
||||
#####################################
|
||||
## LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
# OR
|
||||
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
#
|
||||
# If you want to further customize the ldap connection options you should
|
||||
# directly use ldap.set_option to set the ldap module's global options.
|
||||
# For example:
|
||||
#
|
||||
#import ldap
|
||||
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
|
||||
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
|
||||
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
|
||||
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
|
||||
# See http://www.python-ldap.org/ for further details on these options.
|
||||
|
||||
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
|
||||
#USE_REMOTE_USER_AUTHENTICATION = True
|
||||
|
||||
# Override the URL for the login link (e.g. for django_openid_auth)
|
||||
#LOGIN_URL = '/account/login'
|
||||
|
||||
|
||||
##########################
|
||||
# Database Configuration #
|
||||
##########################
|
||||
# By default sqlite is used. If you cluster multiple webapps you will need
|
||||
# to setup an external database (such as MySQL) and configure all of the webapp
|
||||
# instances to use the same database. Note that this database is only used to store
|
||||
# Django models such as saved graphs, dashboards, user preferences, etc.
|
||||
# Metric data is not stored here.
|
||||
#
|
||||
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
|
||||
#
|
||||
# The following built-in database engines are available:
|
||||
# django.db.backends.postgresql # Removed in Django 1.4
|
||||
# django.db.backends.postgresql_psycopg2
|
||||
# django.db.backends.mysql
|
||||
# django.db.backends.sqlite3
|
||||
# django.db.backends.oracle
|
||||
#
|
||||
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
|
||||
# located in STORAGE_DIR
|
||||
#
|
||||
#DATABASES = {
|
||||
# 'default': {
|
||||
# 'NAME': '/opt/graphite/storage/graphite.db',
|
||||
# 'ENGINE': 'django.db.backends.sqlite3',
|
||||
# 'USER': '',
|
||||
# 'PASSWORD': '',
|
||||
# 'HOST': '',
|
||||
# 'PORT': ''
|
||||
# }
|
||||
#}
|
||||
#
|
||||
|
||||
|
||||
#########################
|
||||
# Cluster Configuration #
|
||||
#########################
|
||||
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
|
||||
#
|
||||
# This should list the IP address (and optionally port) of the webapp on each
|
||||
# remote server in the cluster. These servers must each have local access to
|
||||
# metric data. Note that the first server to return a match for a query will be
|
||||
# used.
|
||||
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
|
||||
|
||||
## These are timeout values (in seconds) for requests to remote webapps
|
||||
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data
|
||||
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests
|
||||
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp
|
||||
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
|
||||
|
||||
## Remote rendering settings
|
||||
# Set to True to enable rendering of Graphs on a remote webapp
|
||||
#REMOTE_RENDERING = True
|
||||
# List of IP (and optionally port) of the webapp on each remote server that
|
||||
# will be used for rendering. Note that each rendering host should have local
|
||||
# access to metric data or should have CLUSTER_SERVERS configured
|
||||
#RENDERING_HOSTS = []
|
||||
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
|
||||
|
||||
# If you are running multiple carbon-caches on this machine (typically behind a relay using
|
||||
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
|
||||
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
|
||||
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
|
||||
#
|
||||
# You *should* use 127.0.0.1 here in most cases
|
||||
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
|
||||
#CARBONLINK_TIMEOUT = 1.0
|
||||
|
||||
#####################################
|
||||
# Additional Django Settings #
|
||||
#####################################
|
||||
# Uncomment the following line for direct access to Django settings such as
|
||||
# MIDDLEWARE_CLASSES or APPS
|
||||
#from graphite.app_settings import *
|
||||
|
||||
import os
|
||||
|
||||
LOG_DIR = '/var/log/graphite'
|
||||
SECRET_KEY = '$(date +%s | sha256sum | base64 | head -c 64)'
|
||||
|
||||
if (os.getenv("MEMCACHE_HOST") is not None):
|
||||
MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOST").split(",")
|
||||
|
||||
if (os.getenv("DEFAULT_CACHE_DURATION") is not None):
|
||||
DEFAULT_CACHE_DURATION = int(os.getenv("CACHE_DURATION"))
|
||||
|
||||
6
docker/blocks/graphite1/conf/opt/statsd/config.js
Normal file
6
docker/blocks/graphite1/conf/opt/statsd/config.js
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"graphiteHost": "127.0.0.1",
|
||||
"graphitePort": 2003,
|
||||
"port": 8125,
|
||||
"flushInterval": 10000
|
||||
}
|
||||
26
docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
Executable file
26
docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env expect
|
||||
|
||||
set timeout -1
|
||||
spawn /usr/local/bin/manage.sh
|
||||
|
||||
expect "Would you like to create one now" {
|
||||
send "yes\r"
|
||||
}
|
||||
|
||||
expect "Username" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Email address:" {
|
||||
send "root.graphite@mailinator.com\r"
|
||||
}
|
||||
|
||||
expect "Password:" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Password *:" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Superuser created successfully"
|
||||
3
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file
3
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings
|
||||
16
docker/blocks/graphite1/fig
Normal file
16
docker/blocks/graphite1/fig
Normal file
@@ -0,0 +1,16 @@
|
||||
graphite:
|
||||
build: blocks/graphite1
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
||||
76
docker/blocks/graphite1/files/carbon.conf
Normal file
76
docker/blocks/graphite1/files/carbon.conf
Normal file
@@ -0,0 +1,76 @@
|
||||
[cache]
|
||||
LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
LOG_UPDATES = False
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
102
docker/blocks/graphite1/files/events_views.py
Normal file
102
docker/blocks/graphite1/files/events_views.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from django.utils.timezone import get_current_timezone
|
||||
from django.core.urlresolvers import get_script_prefix
|
||||
from django.http import HttpResponse
|
||||
from django.shortcuts import render_to_response, get_object_or_404
|
||||
from pytz import timezone
|
||||
|
||||
from graphite.util import json
|
||||
from graphite.events import models
|
||||
from graphite.render.attime import parseATTime
|
||||
|
||||
|
||||
def to_timestamp(dt):
|
||||
return time.mktime(dt.timetuple())
|
||||
|
||||
|
||||
class EventEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return to_timestamp(obj)
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def view_events(request):
|
||||
if request.method == "GET":
|
||||
context = { 'events' : fetch(request),
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("events.html", context)
|
||||
else:
|
||||
return post_event(request)
|
||||
|
||||
def detail(request, event_id):
|
||||
e = get_object_or_404(models.Event, pk=event_id)
|
||||
context = { 'event' : e,
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("event.html", context)
|
||||
|
||||
|
||||
def post_event(request):
|
||||
if request.method == 'POST':
|
||||
event = json.loads(request.body)
|
||||
assert isinstance(event, dict)
|
||||
|
||||
values = {}
|
||||
values["what"] = event["what"]
|
||||
values["tags"] = event.get("tags", None)
|
||||
values["when"] = datetime.datetime.fromtimestamp(
|
||||
event.get("when", time.time()))
|
||||
if "data" in event:
|
||||
values["data"] = event["data"]
|
||||
|
||||
e = models.Event(**values)
|
||||
e.save()
|
||||
|
||||
return HttpResponse(status=200)
|
||||
else:
|
||||
return HttpResponse(status=405)
|
||||
|
||||
def get_data(request):
|
||||
if 'jsonp' in request.REQUEST:
|
||||
response = HttpResponse(
|
||||
"%s(%s)" % (request.REQUEST.get('jsonp'),
|
||||
json.dumps(fetch(request), cls=EventEncoder)),
|
||||
mimetype='text/javascript')
|
||||
else:
|
||||
response = HttpResponse(
|
||||
json.dumps(fetch(request), cls=EventEncoder),
|
||||
mimetype="application/json")
|
||||
return response
|
||||
|
||||
def fetch(request):
|
||||
#XXX we need to move to USE_TZ=True to get rid of naive-time conversions
|
||||
def make_naive(dt):
|
||||
if 'tz' in request.GET:
|
||||
tz = timezone(request.GET['tz'])
|
||||
else:
|
||||
tz = get_current_timezone()
|
||||
local_dt = dt.astimezone(tz)
|
||||
if hasattr(local_dt, 'normalize'):
|
||||
local_dt = local_dt.normalize()
|
||||
return local_dt.replace(tzinfo=None)
|
||||
|
||||
if request.GET.get("from", None) is not None:
|
||||
time_from = make_naive(parseATTime(request.GET["from"]))
|
||||
else:
|
||||
time_from = datetime.datetime.fromtimestamp(0)
|
||||
|
||||
if request.GET.get("until", None) is not None:
|
||||
time_until = make_naive(parseATTime(request.GET["until"]))
|
||||
else:
|
||||
time_until = datetime.datetime.now()
|
||||
|
||||
tags = request.GET.get("tags", None)
|
||||
if tags is not None:
|
||||
tags = request.GET.get("tags").split(" ")
|
||||
|
||||
return [x.as_dict() for x in
|
||||
models.Event.find_events(time_from, time_until, tags=tags)]
|
||||
20
docker/blocks/graphite1/files/initial_data.json
Normal file
20
docker/blocks/graphite1/files/initial_data.json
Normal file
@@ -0,0 +1,20 @@
|
||||
[
|
||||
{
|
||||
"pk": 1,
|
||||
"model": "auth.user",
|
||||
"fields": {
|
||||
"username": "admin",
|
||||
"first_name": "",
|
||||
"last_name": "",
|
||||
"is_active": true,
|
||||
"is_superuser": true,
|
||||
"is_staff": true,
|
||||
"last_login": "2011-09-20 17:02:14",
|
||||
"groups": [],
|
||||
"user_permissions": [],
|
||||
"password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236",
|
||||
"email": "root@example.com",
|
||||
"date_joined": "2011-09-20 17:02:14"
|
||||
}
|
||||
}
|
||||
]
|
||||
42
docker/blocks/graphite1/files/local_settings.py
Normal file
42
docker/blocks/graphite1/files/local_settings.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# Edit this file to override the default graphite settings, do not edit settings.py
|
||||
|
||||
# Turn on debugging and restart apache if you ever see an "Internal Server Error" page
|
||||
#DEBUG = True
|
||||
|
||||
# Set your local timezone (django will try to figure this out automatically)
|
||||
TIME_ZONE = 'UTC'
|
||||
|
||||
# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely
|
||||
#MEMCACHE_HOSTS = ['127.0.0.1:11211']
|
||||
|
||||
# Sometimes you need to do a lot of rendering work but cannot share your storage mount
|
||||
#REMOTE_RENDERING = True
|
||||
#RENDERING_HOSTS = ['fastserver01','fastserver02']
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
|
||||
# If you've got more than one backend server they should all be listed here
|
||||
#CLUSTER_SERVERS = []
|
||||
|
||||
# Override this if you need to provide documentation specific to your graphite deployment
|
||||
#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite"
|
||||
|
||||
# Enable email-related features
|
||||
#SMTP_SERVER = "mail.mycompany.com"
|
||||
|
||||
# LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
|
||||
# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!)
|
||||
#DATABASE_ENGINE = 'mysql' # or 'postgres'
|
||||
#DATABASE_NAME = 'graphite'
|
||||
#DATABASE_USER = 'graphite'
|
||||
#DATABASE_PASSWORD = 'graphite-is-awesome'
|
||||
#DATABASE_HOST = 'mysql.mycompany.com'
|
||||
#DATABASE_PORT = '3306'
|
||||
1
docker/blocks/graphite1/files/my_htpasswd
Normal file
1
docker/blocks/graphite1/files/my_htpasswd
Normal file
@@ -0,0 +1 @@
|
||||
grafana:$apr1$4R/20xhC$8t37jPP5dbcLr48btdkU//
|
||||
70
docker/blocks/graphite1/files/nginx.conf
Normal file
70
docker/blocks/graphite1/files/nginx.conf
Normal file
@@ -0,0 +1,70 @@
|
||||
daemon off;
|
||||
user www-data;
|
||||
worker_processes 1;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
server_tokens off;
|
||||
|
||||
server_names_hash_bucket_size 32;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
|
||||
open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Server $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
client_max_body_size 10m;
|
||||
client_body_buffer_size 128k;
|
||||
|
||||
proxy_connect_timeout 90;
|
||||
proxy_send_timeout 90;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 32k;
|
||||
proxy_busy_buffers_size 64k;
|
||||
proxy_temp_file_write_size 64k;
|
||||
}
|
||||
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
add_header Access-Control-Allow-Methods "GET, OPTIONS";
|
||||
add_header Access-Control-Allow-Headers "origin, authorization, accept";
|
||||
|
||||
location /content {
|
||||
alias /opt/graphite/webapp/content;
|
||||
|
||||
}
|
||||
|
||||
location /media {
|
||||
alias /usr/share/pyshared/django/contrib/admin/media;
|
||||
}
|
||||
}
|
||||
}
|
||||
8
docker/blocks/graphite1/files/statsd_config.js
Normal file
8
docker/blocks/graphite1/files/statsd_config.js
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
graphitePort: 2003,
|
||||
graphiteHost: "127.0.0.1",
|
||||
port: 8125,
|
||||
mgmt_port: 8126,
|
||||
backends: ['./backends/graphite'],
|
||||
debug: true
|
||||
}
|
||||
19
docker/blocks/graphite1/files/storage-aggregation.conf
Normal file
19
docker/blocks/graphite1/files/storage-aggregation.conf
Normal file
@@ -0,0 +1,19 @@
|
||||
[min]
|
||||
pattern = \.min$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.max$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.5
|
||||
aggregationMethod = average
|
||||
16
docker/blocks/graphite1/files/storage-schemas.conf
Normal file
16
docker/blocks/graphite1/files/storage-schemas.conf
Normal file
@@ -0,0 +1,16 @@
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
||||
26
docker/blocks/graphite1/files/supervisord.conf
Normal file
26
docker/blocks/graphite1/files/supervisord.conf
Normal file
@@ -0,0 +1,26 @@
|
||||
[supervisord]
|
||||
nodaemon = true
|
||||
environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf'
|
||||
|
||||
[program:nginx]
|
||||
command = /usr/sbin/nginx
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:carbon-cache]
|
||||
;user = www-data
|
||||
command = /opt/graphite/bin/carbon-cache.py --debug start
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:graphite-webapp]
|
||||
;user = www-data
|
||||
directory = /opt/graphite/webapp
|
||||
environment = PYTHONPATH='/opt/graphite/webapp'
|
||||
command = /usr/bin/gunicorn_django -b127.0.0.1:8000 -w2 graphite/settings.py
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
@@ -6,3 +6,4 @@ postgrestest:
|
||||
POSTGRES_DATABASE: grafana
|
||||
ports:
|
||||
- "5432:5432"
|
||||
command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql
|
||||
|
||||
@@ -15,7 +15,7 @@ weight = 1
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for Debian-based Linux | [grafana_4.4.2_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.2_amd64.deb)
|
||||
Stable for Debian-based Linux | [grafana_4.4.3_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@@ -23,9 +23,9 @@ installation.
|
||||
## Install Stable
|
||||
|
||||
```bash
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.2_amd64.deb
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_4.4.2_amd64.deb
|
||||
sudo dpkg -i grafana_4.4.3_amd64.deb
|
||||
```
|
||||
|
||||
<!--
|
||||
|
||||
@@ -15,7 +15,7 @@ weight = 2
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.4.2 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.2-1.x86_64.rpm)
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.4.3 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@@ -24,19 +24,19 @@ installation.
|
||||
|
||||
You can install Grafana using Yum directly.
|
||||
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.2-1.x86_64.rpm
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
Or install manually using `rpm`.
|
||||
|
||||
#### On CentOS / Fedora / Redhat:
|
||||
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.2-1.x86_64.rpm
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
|
||||
$ sudo yum install initscripts fontconfig
|
||||
$ sudo rpm -Uvh grafana-4.4.2-1.x86_64.rpm
|
||||
$ sudo rpm -Uvh grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
#### On OpenSuse:
|
||||
|
||||
$ sudo rpm -i --nodeps grafana-4.4.1-1.x86_64.rpm
|
||||
$ sudo rpm -i --nodeps grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
## Install via YUM Repository
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ weight = 3
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Latest stable package for Windows | [grafana.4.4.1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.1.windows-x64.zip)
|
||||
Latest stable package for Windows | [grafana.4.4.3.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3.windows-x64.zip)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
|
||||
@@ -43,7 +43,6 @@ var pidFile = flag.String("pidfile", "", "path to pid file")
|
||||
var exitChan = make(chan int)
|
||||
|
||||
func init() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -66,10 +67,10 @@ func updateTotalStats() {
|
||||
return
|
||||
}
|
||||
|
||||
M_StatTotal_Dashboards.Update(statsQuery.Result.DashboardCount)
|
||||
M_StatTotal_Users.Update(statsQuery.Result.UserCount)
|
||||
M_StatTotal_Playlists.Update(statsQuery.Result.PlaylistCount)
|
||||
M_StatTotal_Orgs.Update(statsQuery.Result.OrgCount)
|
||||
M_StatTotal_Dashboards.Update(statsQuery.Result.Dashboards)
|
||||
M_StatTotal_Users.Update(statsQuery.Result.Users)
|
||||
M_StatTotal_Playlists.Update(statsQuery.Result.Playlists)
|
||||
M_StatTotal_Orgs.Update(statsQuery.Result.Orgs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,6 +87,8 @@ func sendUsageStats() {
|
||||
report := map[string]interface{}{
|
||||
"version": version,
|
||||
"metrics": metrics,
|
||||
"os": runtime.GOOS,
|
||||
"arch": runtime.GOARCH,
|
||||
}
|
||||
|
||||
statsQuery := m.GetSystemStatsQuery{}
|
||||
@@ -94,14 +97,16 @@ func sendUsageStats() {
|
||||
return
|
||||
}
|
||||
|
||||
metrics["stats.dashboards.count"] = statsQuery.Result.DashboardCount
|
||||
metrics["stats.users.count"] = statsQuery.Result.UserCount
|
||||
metrics["stats.orgs.count"] = statsQuery.Result.OrgCount
|
||||
metrics["stats.playlist.count"] = statsQuery.Result.PlaylistCount
|
||||
metrics["stats.dashboards.count"] = statsQuery.Result.Dashboards
|
||||
metrics["stats.users.count"] = statsQuery.Result.Users
|
||||
metrics["stats.orgs.count"] = statsQuery.Result.Orgs
|
||||
metrics["stats.playlist.count"] = statsQuery.Result.Playlists
|
||||
metrics["stats.plugins.apps.count"] = len(plugins.Apps)
|
||||
metrics["stats.plugins.panels.count"] = len(plugins.Panels)
|
||||
metrics["stats.plugins.datasources.count"] = len(plugins.DataSources)
|
||||
metrics["stats.alerts.count"] = statsQuery.Result.AlertCount
|
||||
metrics["stats.alerts.count"] = statsQuery.Result.Alerts
|
||||
metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers
|
||||
metrics["stats.datasources.count"] = statsQuery.Result.Datasources
|
||||
|
||||
dsStats := m.GetDataSourceStatsQuery{}
|
||||
if err := bus.Dispatch(&dsStats); err != nil {
|
||||
|
||||
@@ -62,6 +62,15 @@ func GetContextHandler() macaron.Handler {
|
||||
ctx.Data["ctx"] = ctx
|
||||
|
||||
c.Map(ctx)
|
||||
|
||||
// update last seen at
|
||||
// update last seen every 5min
|
||||
if ctx.ShouldUpdateLastSeenAt() {
|
||||
ctx.Logger.Debug("Updating last user_seen_at", "user_id", ctx.UserId)
|
||||
if err := bus.Dispatch(&m.UpdateUserLastSeenAtCommand{UserId: ctx.UserId}); err != nil {
|
||||
ctx.Logger.Error("Failed to update last_seen_at", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,7 +108,7 @@ func initContextWithUserSessionCookie(ctx *Context, orgId int64) bool {
|
||||
|
||||
query := m.GetSignedInUserQuery{UserId: userId, OrgId: orgId}
|
||||
if err := bus.Dispatch(&query); err != nil {
|
||||
ctx.Logger.Error("Failed to get user with id", "userId", userId)
|
||||
ctx.Logger.Error("Failed to get user with id", "userId", userId, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -108,4 +108,6 @@ type OrgUserDTO struct {
|
||||
Email string `json:"email"`
|
||||
Login string `json:"login"`
|
||||
Role string `json:"role"`
|
||||
LastSeenAt time.Time `json:"lastSeenAt"`
|
||||
LastSeenAtAge string `json:"lastSeenAtAge"`
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package models
|
||||
|
||||
type SystemStats struct {
|
||||
DashboardCount int64
|
||||
UserCount int64
|
||||
OrgCount int64
|
||||
PlaylistCount int64
|
||||
AlertCount int64
|
||||
Dashboards int64
|
||||
Datasources int64
|
||||
Users int64
|
||||
ActiveUsers int64
|
||||
Orgs int64
|
||||
Playlists int64
|
||||
Alerts int64
|
||||
}
|
||||
|
||||
type DataSourceStats struct {
|
||||
@@ -22,15 +24,16 @@ type GetDataSourceStatsQuery struct {
|
||||
}
|
||||
|
||||
type AdminStats struct {
|
||||
UserCount int `json:"user_count"`
|
||||
OrgCount int `json:"org_count"`
|
||||
DashboardCount int `json:"dashboard_count"`
|
||||
DbSnapshotCount int `json:"db_snapshot_count"`
|
||||
DbTagCount int `json:"db_tag_count"`
|
||||
DataSourceCount int `json:"data_source_count"`
|
||||
PlaylistCount int `json:"playlist_count"`
|
||||
StarredDbCount int `json:"starred_db_count"`
|
||||
AlertCount int `json:"alert_count"`
|
||||
Users int `json:"users"`
|
||||
Orgs int `json:"orgs"`
|
||||
Dashboards int `json:"dashboards"`
|
||||
Snapshots int `json:"snapshots"`
|
||||
Tags int `json:"tags"`
|
||||
Datasources int `json:"datasources"`
|
||||
Playlists int `json:"playlists"`
|
||||
Stars int `json:"stars"`
|
||||
Alerts int `json:"alerts"`
|
||||
ActiveUsers int `json:"activeUsers"`
|
||||
}
|
||||
|
||||
type GetAdminStatsQuery struct {
|
||||
|
||||
@@ -35,6 +35,7 @@ type User struct {
|
||||
|
||||
Created time.Time
|
||||
Updated time.Time
|
||||
LastSeenAt time.Time
|
||||
}
|
||||
|
||||
func (u *User) NameOrFallback() string {
|
||||
@@ -127,6 +128,7 @@ type GetUserProfileQuery struct {
|
||||
}
|
||||
|
||||
type SearchUsersQuery struct {
|
||||
OrgId int64
|
||||
Query string
|
||||
Page int
|
||||
Limit int
|
||||
@@ -160,6 +162,15 @@ type SignedInUser struct {
|
||||
ApiKeyId int64
|
||||
IsGrafanaAdmin bool
|
||||
HelpFlags1 HelpFlags1
|
||||
LastSeenAt time.Time
|
||||
}
|
||||
|
||||
func (u *SignedInUser) ShouldUpdateLastSeenAt() bool {
|
||||
return u.UserId > 0 && time.Since(u.LastSeenAt) > time.Minute*5
|
||||
}
|
||||
|
||||
type UpdateUserLastSeenAtCommand struct {
|
||||
UserId int64
|
||||
}
|
||||
|
||||
type UserProfileDTO struct {
|
||||
@@ -178,6 +189,8 @@ type UserSearchHitDTO struct {
|
||||
Login string `json:"login"`
|
||||
Email string `json:"email"`
|
||||
IsAdmin bool `json:"isAdmin"`
|
||||
LastSeenAt time.Time `json:"lastSeenAt"`
|
||||
LastSeenAtAge string `json:"lastSeenAtAge"`
|
||||
}
|
||||
|
||||
type UserIdDTO struct {
|
||||
|
||||
@@ -103,4 +103,8 @@ func addUserMigrations(mg *Migrator) {
|
||||
{Name: "company", Type: DB_NVarchar, Length: 255, Nullable: true},
|
||||
{Name: "theme", Type: DB_NVarchar, Length: 255, Nullable: true},
|
||||
}))
|
||||
|
||||
mg.AddMigration("Add last_seen_at column to user", NewAddColumnMigration(userV2, &Column{
|
||||
Name: "last_seen_at", Type: DB_DateTime, Nullable: true,
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -71,11 +72,18 @@ func GetOrgUsers(query *m.GetOrgUsersQuery) error {
|
||||
sess := x.Table("org_user")
|
||||
sess.Join("INNER", "user", fmt.Sprintf("org_user.user_id=%s.id", x.Dialect().Quote("user")))
|
||||
sess.Where("org_user.org_id=?", query.OrgId)
|
||||
sess.Cols("org_user.org_id", "org_user.user_id", "user.email", "user.login", "org_user.role")
|
||||
sess.Cols("org_user.org_id", "org_user.user_id", "user.email", "user.login", "org_user.role", "user.last_seen_at")
|
||||
sess.Asc("user.email", "user.login")
|
||||
|
||||
err := sess.Find(&query.Result)
|
||||
if err := sess.Find(&query.Result); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, user := range query.Result {
|
||||
user.LastSeenAtAge = util.GetAgeString(user.LastSeenAt)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveOrgUser(cmd *m.RemoveOrgUserCommand) error {
|
||||
|
||||
@@ -53,7 +53,7 @@ func EnsureAdminUser() {
|
||||
return
|
||||
}
|
||||
|
||||
if statsQuery.Result.UserCount > 0 {
|
||||
if statsQuery.Result.Users > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ func getEngine() (*xorm.Engine, error) {
|
||||
DbCfg.Path = filepath.Join(setting.DataPath, DbCfg.Path)
|
||||
}
|
||||
os.MkdirAll(path.Dir(DbCfg.Path), os.ModePerm)
|
||||
cnnstr = "file:" + DbCfg.Path + "?cache=shared&mode=rwc&_loc=Local"
|
||||
cnnstr = "file:" + DbCfg.Path + "?cache=shared&mode=rwc"
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown database type: %s", DbCfg.Type)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package sqlstore
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
)
|
||||
@@ -11,6 +13,8 @@ func init() {
|
||||
bus.AddHandler("sql", GetAdminStats)
|
||||
}
|
||||
|
||||
var activeUserTimeLimit time.Duration = time.Hour * 24 * 14
|
||||
|
||||
func GetDataSourceStats(query *m.GetDataSourceStatsQuery) error {
|
||||
var rawSql = `SELECT COUNT(*) as count, type FROM data_source GROUP BY type`
|
||||
query.Result = make([]*m.DataSourceStats, 0)
|
||||
@@ -27,27 +31,35 @@ func GetSystemStats(query *m.GetSystemStatsQuery) error {
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("user") + `
|
||||
) AS user_count,
|
||||
) AS users,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("org") + `
|
||||
) AS org_count,
|
||||
) AS orgs,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("dashboard") + `
|
||||
) AS dashboard_count,
|
||||
) AS dashboards,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("data_source") + `
|
||||
) AS datasources,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("playlist") + `
|
||||
) AS playlist_count,
|
||||
) AS playlists,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("alert") + `
|
||||
) AS alert_count
|
||||
) AS alerts,
|
||||
(
|
||||
SELECT COUNT(*) FROM ` + dialect.Quote("user") + ` where last_seen_at > ?
|
||||
) as active_users
|
||||
`
|
||||
|
||||
activeUserDeadlineDate := time.Now().Add(-activeUserTimeLimit)
|
||||
var stats m.SystemStats
|
||||
_, err := x.Sql(rawSql).Get(&stats)
|
||||
_, err := x.Sql(rawSql, activeUserDeadlineDate).Get(&stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -61,43 +73,48 @@ func GetAdminStats(query *m.GetAdminStatsQuery) error {
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("user") + `
|
||||
) AS user_count,
|
||||
) AS users,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("org") + `
|
||||
) AS org_count,
|
||||
) AS orgs,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("dashboard") + `
|
||||
) AS dashboard_count,
|
||||
) AS dashboards,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("dashboard_snapshot") + `
|
||||
) AS db_snapshot_count,
|
||||
) AS snapshots,
|
||||
(
|
||||
SELECT COUNT( DISTINCT ( ` + dialect.Quote("term") + ` ))
|
||||
FROM ` + dialect.Quote("dashboard_tag") + `
|
||||
) AS db_tag_count,
|
||||
) AS tags,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("data_source") + `
|
||||
) AS data_source_count,
|
||||
) AS datasources,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("playlist") + `
|
||||
) AS playlist_count,
|
||||
) AS playlists,
|
||||
(
|
||||
SELECT COUNT(DISTINCT ` + dialect.Quote("dashboard_id") + ` )
|
||||
FROM ` + dialect.Quote("star") + `
|
||||
) AS starred_db_count,
|
||||
SELECT COUNT(*) FROM ` + dialect.Quote("star") + `
|
||||
) AS stars,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ` + dialect.Quote("alert") + `
|
||||
) AS alert_count
|
||||
) AS alerts,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
from ` + dialect.Quote("user") + ` where last_seen_at > ?
|
||||
) as active_users
|
||||
`
|
||||
|
||||
activeUserDeadlineDate := time.Now().Add(-activeUserTimeLimit)
|
||||
|
||||
var stats m.AdminStats
|
||||
_, err := x.Sql(rawSql).Get(&stats)
|
||||
_, err := x.Sql(rawSql, activeUserDeadlineDate).Get(&stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ func init() {
|
||||
bus.AddHandler("sql", GetUserByLogin)
|
||||
bus.AddHandler("sql", GetUserByEmail)
|
||||
bus.AddHandler("sql", SetUsingOrg)
|
||||
bus.AddHandler("sql", UpdateUserLastSeenAt)
|
||||
bus.AddHandler("sql", GetUserProfile)
|
||||
bus.AddHandler("sql", GetSignedInUser)
|
||||
bus.AddHandler("sql", SearchUsers)
|
||||
@@ -260,6 +261,24 @@ func ChangeUserPassword(cmd *m.ChangeUserPasswordCommand) error {
|
||||
})
|
||||
}
|
||||
|
||||
func UpdateUserLastSeenAt(cmd *m.UpdateUserLastSeenAtCommand) error {
|
||||
return inTransaction(func(sess *DBSession) error {
|
||||
if cmd.UserId <= 0 {
|
||||
}
|
||||
|
||||
user := m.User{
|
||||
Id: cmd.UserId,
|
||||
LastSeenAt: time.Now(),
|
||||
}
|
||||
|
||||
if _, err := sess.Id(cmd.UserId).Update(&user); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func SetUsingOrg(cmd *m.SetUsingOrgCommand) error {
|
||||
getOrgsForUserCmd := &m.GetUserOrgListQuery{UserId: cmd.UserId}
|
||||
GetUserOrgList(getOrgsForUserCmd)
|
||||
@@ -330,6 +349,7 @@ func GetSignedInUser(query *m.GetSignedInUserQuery) error {
|
||||
u.login as login,
|
||||
u.name as name,
|
||||
u.help_flags1 as help_flags1,
|
||||
u.last_seen_at as last_seen_at,
|
||||
org.name as org_name,
|
||||
org_user.role as org_role,
|
||||
org.id as org_id
|
||||
@@ -367,27 +387,49 @@ func SearchUsers(query *m.SearchUsersQuery) error {
|
||||
query.Result = m.SearchUserQueryResult{
|
||||
Users: make([]*m.UserSearchHitDTO, 0),
|
||||
}
|
||||
|
||||
queryWithWildcards := "%" + query.Query + "%"
|
||||
|
||||
whereConditions := make([]string, 0)
|
||||
whereParams := make([]interface{}, 0)
|
||||
sess := x.Table("user")
|
||||
if query.Query != "" {
|
||||
sess.Where("email LIKE ? OR name LIKE ? OR login like ?", queryWithWildcards, queryWithWildcards, queryWithWildcards)
|
||||
|
||||
if query.OrgId > 0 {
|
||||
whereConditions = append(whereConditions, "org_id = ?")
|
||||
whereParams = append(whereParams, query.OrgId)
|
||||
}
|
||||
|
||||
if query.Query != "" {
|
||||
whereConditions = append(whereConditions, "(email LIKE ? OR name LIKE ? OR login like ?)")
|
||||
whereParams = append(whereParams, queryWithWildcards, queryWithWildcards, queryWithWildcards)
|
||||
}
|
||||
|
||||
if len(whereConditions) > 0 {
|
||||
sess.Where(strings.Join(whereConditions, " AND "), whereParams...)
|
||||
}
|
||||
|
||||
offset := query.Limit * (query.Page - 1)
|
||||
sess.Limit(query.Limit, offset)
|
||||
sess.Cols("id", "email", "name", "login", "is_admin")
|
||||
sess.Cols("id", "email", "name", "login", "is_admin", "last_seen_at")
|
||||
if err := sess.Find(&query.Result.Users); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get total
|
||||
user := m.User{}
|
||||
|
||||
countSess := x.Table("user")
|
||||
if query.Query != "" {
|
||||
countSess.Where("email LIKE ? OR name LIKE ? OR login like ?", queryWithWildcards, queryWithWildcards, queryWithWildcards)
|
||||
|
||||
if len(whereConditions) > 0 {
|
||||
countSess.Where(strings.Join(whereConditions, " AND "), whereParams...)
|
||||
}
|
||||
|
||||
count, err := countSess.Count(&user)
|
||||
query.Result.TotalCount = count
|
||||
|
||||
for _, user := range query.Result.Users {
|
||||
user.LastSeenAtAge = util.GetAgeString(user.LastSeenAt)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -74,6 +74,19 @@ func (m *MySqlMacroEngine) EvaluateMacro(name string, args []string) (string, er
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= FROM_UNIXTIME(%d) AND %s <= FROM_UNIXTIME(%d)", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__timeFrom":
|
||||
return fmt.Sprintf("FROM_UNIXTIME(%d)", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
case "__timeTo":
|
||||
return fmt.Sprintf("FROM_UNIXTIME(%d)", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__unixEpochFilter":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__unixEpochFrom":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
case "__unixEpochTo":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
default:
|
||||
return "", fmt.Errorf("Unknown macro %v", name)
|
||||
}
|
||||
|
||||
@@ -39,5 +39,60 @@ func TestMacroEngine(t *testing.T) {
|
||||
So(sql, ShouldEqual, "WHERE time_column >= FROM_UNIXTIME(18446744066914186738) AND time_column <= FROM_UNIXTIME(18446744066914187038)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914186738)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914187038)")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__unixEpochFilter(18446744066914186738)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738 >= 18446744066914186738 AND 18446744066914186738 <= 18446744066914187038")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
engine := &MySqlMacroEngine{
|
||||
TimeRange: &tsdb.TimeRange{From: "5m", To: "now"},
|
||||
}
|
||||
|
||||
sql, err := engine.Interpolate("select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914187038")
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
@@ -205,6 +205,8 @@ func (e MysqlExecutor) getTypedRowData(types []*sql.ColumnType, rows *core.Rows)
|
||||
values[i] = new(float32)
|
||||
case mysql.FieldTypeNameNewDecimal:
|
||||
values[i] = new(float64)
|
||||
case mysql.FieldTypeNameFloat:
|
||||
values[i] = new(float64)
|
||||
case mysql.FieldTypeNameTimestamp:
|
||||
values[i] = new(time.Time)
|
||||
case mysql.FieldTypeNameDateTime:
|
||||
@@ -215,6 +217,20 @@ func (e MysqlExecutor) getTypedRowData(types []*sql.ColumnType, rows *core.Rows)
|
||||
values[i] = new(int16)
|
||||
case mysql.FieldTypeNameNULL:
|
||||
values[i] = nil
|
||||
case mysql.FieldTypeNameBit:
|
||||
values[i] = new([]byte)
|
||||
case mysql.FieldTypeNameBLOB:
|
||||
values[i] = new(string)
|
||||
case mysql.FieldTypeNameTinyBLOB:
|
||||
values[i] = new(string)
|
||||
case mysql.FieldTypeNameMediumBLOB:
|
||||
values[i] = new(string)
|
||||
case mysql.FieldTypeNameLongBLOB:
|
||||
values[i] = new(string)
|
||||
case mysql.FieldTypeNameString:
|
||||
values[i] = new(string)
|
||||
case mysql.FieldTypeNameDate:
|
||||
values[i] = new(string)
|
||||
default:
|
||||
return nil, fmt.Errorf("Database type %s not supported", stype.DatabaseTypeName())
|
||||
}
|
||||
|
||||
124
pkg/tsdb/mysql/mysql_test.go
Normal file
124
pkg/tsdb/mysql/mysql_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/xorm"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/sqlutil"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
// To run this test, remove the Skip from SkipConvey
|
||||
// and set up a MySQL db named grafana_tests and a user/password grafana/password
|
||||
func TestMySQL(t *testing.T) {
|
||||
SkipConvey("MySQL", t, func() {
|
||||
x := InitMySQLTestDB(t)
|
||||
|
||||
executor := &MysqlExecutor{
|
||||
engine: x,
|
||||
log: log.New("tsdb.mysql"),
|
||||
}
|
||||
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
db := sess.DB()
|
||||
|
||||
sql := "CREATE TABLE `mysql_types` ("
|
||||
sql += "`atinyint` tinyint(1),"
|
||||
sql += "`avarchar` varchar(3),"
|
||||
sql += "`achar` char(3),"
|
||||
sql += "`amediumint` mediumint,"
|
||||
sql += "`asmallint` smallint,"
|
||||
sql += "`abigint` bigint,"
|
||||
sql += "`aint` int(11),"
|
||||
sql += "`adouble` double(10,2),"
|
||||
sql += "`anewdecimal` decimal(10,2),"
|
||||
sql += "`afloat` float(10,2),"
|
||||
sql += "`atimestamp` timestamp NOT NULL,"
|
||||
sql += "`adatetime` datetime,"
|
||||
sql += "`atime` time,"
|
||||
// sql += "`ayear` year," // Crashes xorm when running cleandb
|
||||
sql += "`abit` bit(1),"
|
||||
sql += "`atinytext` tinytext,"
|
||||
sql += "`atinyblob` tinyblob,"
|
||||
sql += "`atext` text,"
|
||||
sql += "`ablob` blob,"
|
||||
sql += "`amediumtext` mediumtext,"
|
||||
sql += "`amediumblob` mediumblob,"
|
||||
sql += "`alongtext` longtext,"
|
||||
sql += "`alongblob` longblob,"
|
||||
sql += "`aenum` enum('val1', 'val2'),"
|
||||
sql += "`aset` set('a', 'b', 'c', 'd'),"
|
||||
sql += "`adate` date"
|
||||
sql += ") ENGINE=InnoDB DEFAULT CHARSET=latin1;"
|
||||
_, err := sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
sql = "INSERT INTO `mysql_types` "
|
||||
sql += "(`atinyint`, `avarchar`, `achar`, `amediumint`, `asmallint`, `abigint`, `aint`, `adouble`, "
|
||||
sql += "`anewdecimal`, `afloat`, `adatetime`, `atimestamp`, `atime`, `abit`, `atinytext`, "
|
||||
sql += "`atinyblob`, `atext`, `ablob`, `amediumtext`, `amediumblob`, `alongtext`, `alongblob`, "
|
||||
sql += "`aenum`, `aset`, `adate`) "
|
||||
sql += "VALUES(1, 'abc', 'def', 1, 10, 100, 1420070400, 1.11, "
|
||||
sql += "2.22, 3.33, now(), current_timestamp(), '11:11:11', 1, 'tinytext', "
|
||||
sql += "'tinyblob', 'text', 'blob', 'mediumtext', 'mediumblob', 'longtext', 'longblob', "
|
||||
sql += "'val2', 'a,b', curdate());"
|
||||
_, err = sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("TransformToTable should map MySQL column types to Go types", func() {
|
||||
rows, err := db.Query("SELECT * FROM mysql_types")
|
||||
defer rows.Close()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
queryResult := &tsdb.QueryResult{Meta: simplejson.New()}
|
||||
err = executor.TransformToTable(nil, rows, queryResult)
|
||||
So(err, ShouldBeNil)
|
||||
column := queryResult.Tables[0].Rows[0]
|
||||
So(*column[0].(*int8), ShouldEqual, 1)
|
||||
So(*column[1].(*string), ShouldEqual, "abc")
|
||||
So(*column[2].(*string), ShouldEqual, "def")
|
||||
So(*column[3].(*int32), ShouldEqual, 1)
|
||||
So(*column[4].(*int16), ShouldEqual, 10)
|
||||
So(*column[5].(*int64), ShouldEqual, 100)
|
||||
So(*column[6].(*int), ShouldEqual, 1420070400)
|
||||
So(*column[7].(*float64), ShouldEqual, 1.11)
|
||||
So(*column[8].(*float64), ShouldEqual, 2.22)
|
||||
So(*column[9].(*float64), ShouldEqual, 3.33)
|
||||
_, offset := time.Now().Zone()
|
||||
So((*column[10].(*time.Time)), ShouldHappenWithin, time.Duration(10*time.Second), time.Now().Add(time.Duration(offset)*time.Second))
|
||||
So(*column[11].(*time.Time), ShouldHappenWithin, time.Duration(10*time.Second), time.Now().Add(time.Duration(offset)*time.Second))
|
||||
So(*column[12].(*string), ShouldEqual, "11:11:11")
|
||||
So(*column[13].(*[]byte), ShouldHaveSameTypeAs, []byte{1})
|
||||
So(*column[14].(*string), ShouldEqual, "tinytext")
|
||||
So(*column[15].(*string), ShouldEqual, "tinyblob")
|
||||
So(*column[16].(*string), ShouldEqual, "text")
|
||||
So(*column[17].(*string), ShouldEqual, "blob")
|
||||
So(*column[18].(*string), ShouldEqual, "mediumtext")
|
||||
So(*column[19].(*string), ShouldEqual, "mediumblob")
|
||||
So(*column[20].(*string), ShouldEqual, "longtext")
|
||||
So(*column[21].(*string), ShouldEqual, "longblob")
|
||||
So(*column[22].(*string), ShouldEqual, "val2")
|
||||
So(*column[23].(*string), ShouldEqual, "a,b")
|
||||
So(*column[24].(*string), ShouldEqual, time.Now().Format("2006-01-02T00:00:00Z"))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func InitMySQLTestDB(t *testing.T) *xorm.Engine {
|
||||
x, err := xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, sqlutil.TestDB_Mysql.ConnStr+"&parseTime=true")
|
||||
|
||||
// x.ShowSQL()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init mysql db %v", err)
|
||||
}
|
||||
|
||||
sqlutil.CleanDB(x)
|
||||
|
||||
return x
|
||||
}
|
||||
@@ -1,7 +1,10 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
func StringsFallback2(val1 string, val2 string) string {
|
||||
@@ -28,3 +31,34 @@ func SplitString(str string) []string {
|
||||
|
||||
return regexp.MustCompile("[, ]+").Split(str, -1)
|
||||
}
|
||||
|
||||
func GetAgeString(t time.Time) string {
|
||||
if t.IsZero() {
|
||||
return "?"
|
||||
}
|
||||
|
||||
sinceNow := time.Since(t)
|
||||
minutes := sinceNow.Minutes()
|
||||
years := int(math.Floor(minutes / 525600))
|
||||
months := int(math.Floor(minutes / 43800))
|
||||
days := int(math.Floor(minutes / 1440))
|
||||
hours := int(math.Floor(minutes / 60))
|
||||
|
||||
if years > 0 {
|
||||
return fmt.Sprintf("%dy", years)
|
||||
}
|
||||
if months > 0 {
|
||||
return fmt.Sprintf("%dM", months)
|
||||
}
|
||||
if days > 0 {
|
||||
return fmt.Sprintf("%dd", days)
|
||||
}
|
||||
if hours > 0 {
|
||||
return fmt.Sprintf("%dh", hours)
|
||||
}
|
||||
if int(minutes) > 0 {
|
||||
return fmt.Sprintf("%dm", int(minutes))
|
||||
}
|
||||
|
||||
return "< 1m"
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
@@ -24,3 +25,15 @@ func TestSplitString(t *testing.T) {
|
||||
So(SplitString("test1 , test2 test3"), ShouldResemble, []string{"test1", "test2", "test3"})
|
||||
})
|
||||
}
|
||||
|
||||
func TestDateAge(t *testing.T) {
|
||||
Convey("GetAgeString", t, func() {
|
||||
So(GetAgeString(time.Time{}), ShouldEqual, "?")
|
||||
So(GetAgeString(time.Now().Add(-time.Second*2)), ShouldEqual, "< 1m")
|
||||
So(GetAgeString(time.Now().Add(-time.Minute*2)), ShouldEqual, "2m")
|
||||
So(GetAgeString(time.Now().Add(-time.Hour*2)), ShouldEqual, "2h")
|
||||
So(GetAgeString(time.Now().Add(-time.Hour*24*3)), ShouldEqual, "3d")
|
||||
So(GetAgeString(time.Now().Add(-time.Hour*24*67)), ShouldEqual, "2M")
|
||||
So(GetAgeString(time.Now().Add(-time.Hour*24*409)), ShouldEqual, "1y")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -129,6 +129,10 @@ function (angular, _, coreModule, config) {
|
||||
}
|
||||
|
||||
var first = variable.current.value;
|
||||
if (first === 'default') {
|
||||
first = config.defaultDatasource;
|
||||
}
|
||||
|
||||
var ds = config.datasources[first];
|
||||
|
||||
if (ds) {
|
||||
|
||||
@@ -3,9 +3,11 @@ export default class TableModel {
|
||||
columns: any[];
|
||||
rows: any[];
|
||||
type: string;
|
||||
columnMap: any;
|
||||
|
||||
constructor() {
|
||||
this.columns = [];
|
||||
this.columnMap = {};
|
||||
this.rows = [];
|
||||
this.type = 'table';
|
||||
}
|
||||
@@ -36,4 +38,11 @@ export default class TableModel {
|
||||
this.columns[options.col].desc = false;
|
||||
}
|
||||
}
|
||||
|
||||
addColumn(col) {
|
||||
if (!this.columnMap[col.text]) {
|
||||
this.columns.push(col);
|
||||
this.columnMap[col.text] = col;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,39 +15,43 @@
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Total dashboards</td>
|
||||
<td>{{ctrl.stats.dashboard_count}}</td>
|
||||
<td>{{ctrl.stats.dashboards}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total users</td>
|
||||
<td>{{ctrl.stats.user_count}}</td>
|
||||
<td>{{ctrl.stats.users}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Active users (seen last 14 days)</td>
|
||||
<td>{{ctrl.stats.activeUsers}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total organizations</td>
|
||||
<td>{{ctrl.stats.org_count}}</td>
|
||||
<td>{{ctrl.stats.orgs}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total datasources</td>
|
||||
<td>{{ctrl.stats.data_source_count}}</td>
|
||||
<td>{{ctrl.stats.datasources}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total playlists</td>
|
||||
<td>{{ctrl.stats.playlist_count}}</td>
|
||||
<td>{{ctrl.stats.playlists}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total snapshots</td>
|
||||
<td>{{ctrl.stats.db_snapshot_count}}</td>
|
||||
<td>{{ctrl.stats.snapshots}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total dashboard tags</td>
|
||||
<td>{{ctrl.stats.db_tag_count}}</td>
|
||||
<td>{{ctrl.stats.tags}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total starred dashboards</td>
|
||||
<td>{{ctrl.stats.starred_db_count}}</td>
|
||||
<td>{{ctrl.stats.stars}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total alerts</td>
|
||||
<td>{{ctrl.stats.alert_count}}</td>
|
||||
<td>{{ctrl.stats.alerts}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
@@ -25,7 +25,11 @@
|
||||
<th>Name</th>
|
||||
<th>Login</th>
|
||||
<th>Email</th>
|
||||
<th style="white-space: nowrap">Grafana Admin</th>
|
||||
<th>
|
||||
Seen
|
||||
<tip>Time since user was seen using Grafana</tip>
|
||||
</th>
|
||||
<th></th>
|
||||
<th></th>
|
||||
</tr>
|
||||
</thead>
|
||||
@@ -35,7 +39,12 @@
|
||||
<td>{{user.name}}</td>
|
||||
<td>{{user.login}}</td>
|
||||
<td>{{user.email}}</td>
|
||||
<td>{{user.isAdmin}}</td>
|
||||
<td>
|
||||
{{user.lastSeenAtAge}}
|
||||
</td>
|
||||
<td>
|
||||
<i class="fa fa-shield" ng-show="user.isAdmin" bs-tooltip="'Grafana Admin'"></i>
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<a href="admin/users/edit/{{user.id}}" class="btn btn-inverse btn-small">
|
||||
<i class="fa fa-edit"></i>
|
||||
|
||||
@@ -10,9 +10,10 @@ export class AdHocFiltersCtrl {
|
||||
removeTagFilterSegment: any;
|
||||
|
||||
/** @ngInject */
|
||||
constructor(private uiSegmentSrv, private datasourceSrv, private $q, private templateSrv, private $rootScope) {
|
||||
constructor(private uiSegmentSrv, private datasourceSrv, private $q, private variableSrv, private $scope, private $rootScope) {
|
||||
this.removeTagFilterSegment = uiSegmentSrv.newSegment({fake: true, value: '-- remove filter --'});
|
||||
this.buildSegmentModel();
|
||||
this.$rootScope.onAppEvent('template-variable-value-updated', this.buildSegmentModel.bind(this), $scope);
|
||||
}
|
||||
|
||||
buildSegmentModel() {
|
||||
@@ -141,8 +142,7 @@ export class AdHocFiltersCtrl {
|
||||
}
|
||||
|
||||
this.variable.setFilters(filters);
|
||||
this.$rootScope.$emit('template-variable-value-updated');
|
||||
this.$rootScope.$broadcast('refresh');
|
||||
this.variableSrv.variableUpdated(this.variable, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -135,9 +135,10 @@ export class DashNavCtrl {
|
||||
|
||||
viewJson() {
|
||||
var clone = this.dashboard.getSaveModelClone();
|
||||
var html = angular.toJson(clone, true);
|
||||
var uri = "data:application/json;charset=utf-8," + encodeURIComponent(html);
|
||||
var newWindow = window.open(uri);
|
||||
|
||||
this.$rootScope.appEvent('show-json-editor', {
|
||||
object: clone,
|
||||
});
|
||||
}
|
||||
|
||||
showSearch() {
|
||||
|
||||
@@ -44,12 +44,16 @@ export class SaveDashboardAsModalCtrl {
|
||||
this.clone.editable = true;
|
||||
this.clone.hideControls = false;
|
||||
|
||||
// remove alerts
|
||||
// remove alerts if source dashboard is already persisted
|
||||
// do not want to create alert dupes
|
||||
if (dashboard.id > 0) {
|
||||
this.clone.rows.forEach(row => {
|
||||
row.panels.forEach(panel => {
|
||||
delete panel.thresholds;
|
||||
delete panel.alert;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
delete this.clone.autoUpdate;
|
||||
}
|
||||
|
||||
@@ -22,10 +22,7 @@ export class SubmenuCtrl {
|
||||
}
|
||||
|
||||
variableUpdated(variable) {
|
||||
this.variableSrv.variableUpdated(variable).then(() => {
|
||||
this.$rootScope.$emit('template-variable-value-updated');
|
||||
this.$rootScope.$broadcast('refresh');
|
||||
});
|
||||
this.variableSrv.variableUpdated(variable, true);
|
||||
}
|
||||
|
||||
openEditView(editview) {
|
||||
|
||||
@@ -41,6 +41,10 @@
|
||||
<tr>
|
||||
<th>Login</th>
|
||||
<th>Email</th>
|
||||
<th>
|
||||
Seen
|
||||
<tip>Time since user was seen using Grafana</tip>
|
||||
</th>
|
||||
<th>Role</th>
|
||||
<th style="width: 34px;"></th>
|
||||
</tr>
|
||||
@@ -48,6 +52,7 @@
|
||||
<tr ng-repeat="user in ctrl.users">
|
||||
<td>{{user.login}}</td>
|
||||
<td><span class="ellipsis">{{user.email}}</span></td>
|
||||
<td>{{user.lastSeenAtAge}}</td>
|
||||
<td>
|
||||
<select type="text" ng-model="user.role" class="input-medium" ng-options="f for f in ['Viewer', 'Editor', 'Read Only Editor', 'Admin']" ng-change="ctrl.updateOrgUser(user)">
|
||||
</select>
|
||||
|
||||
@@ -35,14 +35,6 @@ export class VariableEditorCtrl {
|
||||
$scope.init = function() {
|
||||
$scope.mode = 'list';
|
||||
|
||||
$scope.datasources = _.filter(datasourceSrv.getMetricSources(), function(ds) {
|
||||
return !ds.meta.mixed && ds.value !== null;
|
||||
});
|
||||
|
||||
$scope.datasourceTypes = _($scope.datasources).uniqBy('meta.id').map(function(ds) {
|
||||
return {text: ds.meta.name, value: ds.meta.id};
|
||||
}).value();
|
||||
|
||||
$scope.variables = variableSrv.variables;
|
||||
$scope.reset();
|
||||
|
||||
@@ -55,9 +47,8 @@ export class VariableEditorCtrl {
|
||||
|
||||
$scope.add = function() {
|
||||
if ($scope.isValid()) {
|
||||
$scope.variables.push($scope.current);
|
||||
variableSrv.addVariable($scope.current);
|
||||
$scope.update();
|
||||
$scope.dashboard.updateSubmenuVisibility();
|
||||
}
|
||||
};
|
||||
|
||||
@@ -114,9 +105,8 @@ export class VariableEditorCtrl {
|
||||
$scope.duplicate = function(variable) {
|
||||
var clone = _.cloneDeep(variable.getSaveModel());
|
||||
$scope.current = variableSrv.createVariableFromModel(clone);
|
||||
$scope.variables.push($scope.current);
|
||||
$scope.current.name = 'copy_of_'+variable.name;
|
||||
$scope.dashboard.updateSubmenuVisibility();
|
||||
$scope.variableSrv.addVariable($scope.current);
|
||||
};
|
||||
|
||||
$scope.update = function() {
|
||||
@@ -132,6 +122,15 @@ export class VariableEditorCtrl {
|
||||
$scope.reset = function() {
|
||||
$scope.currentIsNew = true;
|
||||
$scope.current = variableSrv.createVariableFromModel({type: 'query'});
|
||||
|
||||
// this is done here in case a new data source type variable was added
|
||||
$scope.datasources = _.filter(datasourceSrv.getMetricSources(), function(ds) {
|
||||
return !ds.meta.mixed && ds.value !== null;
|
||||
});
|
||||
|
||||
$scope.datasourceTypes = _($scope.datasources).uniqBy('meta.id').map(function(ds) {
|
||||
return {text: ds.meta.name, value: ds.meta.id};
|
||||
}).value();
|
||||
};
|
||||
|
||||
$scope.typeChanged = function() {
|
||||
@@ -150,9 +149,7 @@ export class VariableEditorCtrl {
|
||||
};
|
||||
|
||||
$scope.removeVariable = function(variable) {
|
||||
var index = _.indexOf($scope.variables, variable);
|
||||
$scope.variables.splice(index, 1);
|
||||
$scope.dashboard.updateSubmenuVisibility();
|
||||
variableSrv.removeVariable(variable);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ describe('VariableSrv', function() {
|
||||
ctx.variableSrv.init({
|
||||
templating: {list: []},
|
||||
events: new Emitter(),
|
||||
updateSubmenuVisibility: sinon.stub(),
|
||||
});
|
||||
ctx.$rootScope.$digest();
|
||||
}));
|
||||
@@ -41,7 +42,9 @@ describe('VariableSrv', function() {
|
||||
ctx.datasourceSrv.getMetricSources = sinon.stub().returns(scenario.metricSources);
|
||||
|
||||
|
||||
scenario.variable = ctx.variableSrv.addVariable(scenario.variableModel);
|
||||
scenario.variable = ctx.variableSrv.createVariableFromModel(scenario.variableModel);
|
||||
ctx.variableSrv.addVariable(scenario.variable);
|
||||
|
||||
ctx.variableSrv.updateOptions(scenario.variable);
|
||||
ctx.$rootScope.$digest();
|
||||
});
|
||||
|
||||
@@ -90,17 +90,24 @@ export class VariableSrv {
|
||||
return variable;
|
||||
}
|
||||
|
||||
addVariable(model) {
|
||||
var variable = this.createVariableFromModel(model);
|
||||
addVariable(variable) {
|
||||
this.variables.push(variable);
|
||||
return variable;
|
||||
this.templateSrv.updateTemplateData();
|
||||
this.dashboard.updateSubmenuVisibility();
|
||||
}
|
||||
|
||||
removeVariable(variable) {
|
||||
var index = _.indexOf(this.variables, variable);
|
||||
this.variables.splice(index, 1);
|
||||
this.templateSrv.updateTemplateData();
|
||||
this.dashboard.updateSubmenuVisibility();
|
||||
}
|
||||
|
||||
updateOptions(variable) {
|
||||
return variable.updateOptions();
|
||||
}
|
||||
|
||||
variableUpdated(variable) {
|
||||
variableUpdated(variable, emitChangeEvents?) {
|
||||
// if there is a variable lock ignore cascading update because we are in a boot up scenario
|
||||
if (variable.initLock) {
|
||||
return this.$q.when();
|
||||
@@ -117,7 +124,12 @@ export class VariableSrv {
|
||||
}
|
||||
});
|
||||
|
||||
return this.$q.all(promises);
|
||||
return this.$q.all(promises).then(() => {
|
||||
if (emitChangeEvents) {
|
||||
this.$rootScope.$emit('template-variable-value-updated');
|
||||
this.$rootScope.$broadcast('refresh');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
selectOptionsForCurrentValue(variable) {
|
||||
@@ -218,6 +230,28 @@ export class VariableSrv {
|
||||
// update url
|
||||
this.$location.search(params);
|
||||
}
|
||||
|
||||
setAdhocFilter(options) {
|
||||
var variable = _.find(this.variables, {type: 'adhoc', datasource: options.datasource});
|
||||
if (!variable) {
|
||||
variable = this.createVariableFromModel({name: 'Filters', type: 'adhoc', datasource: options.datasource});
|
||||
this.addVariable(variable);
|
||||
}
|
||||
|
||||
let filters = variable.filters;
|
||||
let filter = _.find(filters, {key: options.key, value: options.value});
|
||||
|
||||
if (!filter) {
|
||||
filter = {key: options.key, value: options.value};
|
||||
filters.push(filter);
|
||||
}
|
||||
|
||||
filter.operator = options.operator;
|
||||
|
||||
variable.setFilters(filters);
|
||||
this.variableUpdated(variable, true);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
coreModule.service('variableSrv', VariableSrv);
|
||||
|
||||
@@ -11,6 +11,8 @@ define([
|
||||
function (angular, _, moment, kbn, ElasticQueryBuilder, IndexPattern, ElasticResponse) {
|
||||
'use strict';
|
||||
|
||||
ElasticResponse = ElasticResponse.ElasticResponse;
|
||||
|
||||
/** @ngInject */
|
||||
function ElasticDatasource(instanceSettings, $q, backendSrv, templateSrv, timeSrv) {
|
||||
this.basicAuth = instanceSettings.basicAuth;
|
||||
@@ -270,10 +272,17 @@ function (angular, _, moment, kbn, ElasticQueryBuilder, IndexPattern, ElasticRes
|
||||
var subObj = obj[key];
|
||||
|
||||
// Check mapping field for nested fields
|
||||
if (subObj.hasOwnProperty('properties')) {
|
||||
if (_.isObject(subObj.properties)) {
|
||||
fieldNameParts.push(key);
|
||||
getFieldsRecursively(subObj.properties);
|
||||
} else {
|
||||
}
|
||||
|
||||
if (_.isObject(subObj.fields)) {
|
||||
fieldNameParts.push(key);
|
||||
getFieldsRecursively(subObj.fields);
|
||||
}
|
||||
|
||||
if (_.isString(subObj.type)) {
|
||||
var fieldName = fieldNameParts.concat(key).join('.');
|
||||
|
||||
// Hide meta-fields and check field type
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
declare var test: any;
|
||||
export default test;
|
||||
@@ -1,350 +0,0 @@
|
||||
define([
|
||||
"lodash",
|
||||
"./query_def"
|
||||
],
|
||||
function (_, queryDef) {
|
||||
'use strict';
|
||||
|
||||
function ElasticResponse(targets, response) {
|
||||
this.targets = targets;
|
||||
this.response = response;
|
||||
}
|
||||
|
||||
ElasticResponse.prototype.processMetrics = function(esAgg, target, seriesList, props) {
|
||||
var metric, y, i, newSeries, bucket, value;
|
||||
|
||||
for (y = 0; y < target.metrics.length; y++) {
|
||||
metric = target.metrics[y];
|
||||
if (metric.hide) {
|
||||
continue;
|
||||
}
|
||||
|
||||
switch(metric.type) {
|
||||
case 'count': {
|
||||
newSeries = { datapoints: [], metric: 'count', props: props};
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
value = bucket.doc_count;
|
||||
newSeries.datapoints.push([value, bucket.key]);
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
break;
|
||||
}
|
||||
case 'percentiles': {
|
||||
if (esAgg.buckets.length === 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
var firstBucket = esAgg.buckets[0];
|
||||
var percentiles = firstBucket[metric.id].values;
|
||||
|
||||
for (var percentileName in percentiles) {
|
||||
newSeries = {datapoints: [], metric: 'p' + percentileName, props: props, field: metric.field};
|
||||
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
var values = bucket[metric.id].values;
|
||||
newSeries.datapoints.push([values[percentileName], bucket.key]);
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case 'extended_stats': {
|
||||
for (var statName in metric.meta) {
|
||||
if (!metric.meta[statName]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
newSeries = {datapoints: [], metric: statName, props: props, field: metric.field};
|
||||
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
var stats = bucket[metric.id];
|
||||
|
||||
// add stats that are in nested obj to top level obj
|
||||
stats.std_deviation_bounds_upper = stats.std_deviation_bounds.upper;
|
||||
stats.std_deviation_bounds_lower = stats.std_deviation_bounds.lower;
|
||||
|
||||
newSeries.datapoints.push([stats[statName], bucket.key]);
|
||||
}
|
||||
|
||||
seriesList.push(newSeries);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
newSeries = { datapoints: [], metric: metric.type, field: metric.field, props: props};
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
|
||||
value = bucket[metric.id];
|
||||
if (value !== undefined) {
|
||||
if (value.normalized_value) {
|
||||
newSeries.datapoints.push([value.normalized_value, bucket.key]);
|
||||
} else {
|
||||
newSeries.datapoints.push([value.value, bucket.key]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.processAggregationDocs = function(esAgg, aggDef, target, docs, props) {
|
||||
var metric, y, i, bucket, metricName, doc;
|
||||
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
doc = _.defaults({}, props);
|
||||
doc[aggDef.field] = bucket.key;
|
||||
|
||||
for (y = 0; y < target.metrics.length; y++) {
|
||||
metric = target.metrics[y];
|
||||
|
||||
switch(metric.type) {
|
||||
case "count": {
|
||||
metricName = this._getMetricName(metric.type);
|
||||
doc[metricName] = bucket.doc_count;
|
||||
break;
|
||||
}
|
||||
case 'extended_stats': {
|
||||
for (var statName in metric.meta) {
|
||||
if (!metric.meta[statName]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
var stats = bucket[metric.id];
|
||||
// add stats that are in nested obj to top level obj
|
||||
stats.std_deviation_bounds_upper = stats.std_deviation_bounds.upper;
|
||||
stats.std_deviation_bounds_lower = stats.std_deviation_bounds.lower;
|
||||
|
||||
metricName = this._getMetricName(statName);
|
||||
doc[metricName] = stats[statName];
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
metricName = this._getMetricName(metric.type);
|
||||
var otherMetrics = _.filter(target.metrics, {type: metric.type});
|
||||
|
||||
// if more of the same metric type include field field name in property
|
||||
if (otherMetrics.length > 1) {
|
||||
metricName += ' ' + metric.field;
|
||||
}
|
||||
|
||||
doc[metricName] = bucket[metric.id].value;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
docs.push(doc);
|
||||
}
|
||||
};
|
||||
|
||||
// This is quite complex
|
||||
// neeed to recurise down the nested buckets to build series
|
||||
ElasticResponse.prototype.processBuckets = function(aggs, target, seriesList, docs, props, depth) {
|
||||
var bucket, aggDef, esAgg, aggId;
|
||||
var maxDepth = target.bucketAggs.length-1;
|
||||
|
||||
for (aggId in aggs) {
|
||||
aggDef = _.find(target.bucketAggs, {id: aggId});
|
||||
esAgg = aggs[aggId];
|
||||
|
||||
if (!aggDef) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (depth === maxDepth) {
|
||||
if (aggDef.type === 'date_histogram') {
|
||||
this.processMetrics(esAgg, target, seriesList, props);
|
||||
} else {
|
||||
this.processAggregationDocs(esAgg, aggDef, target, docs, props);
|
||||
}
|
||||
} else {
|
||||
for (var nameIndex in esAgg.buckets) {
|
||||
bucket = esAgg.buckets[nameIndex];
|
||||
props = _.clone(props);
|
||||
if (bucket.key !== void 0) {
|
||||
props[aggDef.field] = bucket.key;
|
||||
} else {
|
||||
props["filter"] = nameIndex;
|
||||
}
|
||||
if (bucket.key_as_string) {
|
||||
props[aggDef.field] = bucket.key_as_string;
|
||||
}
|
||||
this.processBuckets(bucket, target, seriesList, docs, props, depth+1);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype._getMetricName = function(metric) {
|
||||
var metricDef = _.find(queryDef.metricAggTypes, {value: metric});
|
||||
if (!metricDef) {
|
||||
metricDef = _.find(queryDef.extendedStats, {value: metric});
|
||||
}
|
||||
|
||||
return metricDef ? metricDef.text : metric;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype._getSeriesName = function(series, target, metricTypeCount) {
|
||||
var metricName = this._getMetricName(series.metric);
|
||||
|
||||
if (target.alias) {
|
||||
var regex = /\{\{([\s\S]+?)\}\}/g;
|
||||
|
||||
return target.alias.replace(regex, function(match, g1, g2) {
|
||||
var group = g1 || g2;
|
||||
|
||||
if (group.indexOf('term ') === 0) { return series.props[group.substring(5)]; }
|
||||
if (series.props[group] !== void 0) { return series.props[group]; }
|
||||
if (group === 'metric') { return metricName; }
|
||||
if (group === 'field') { return series.field; }
|
||||
|
||||
return match;
|
||||
});
|
||||
}
|
||||
|
||||
if (series.field && queryDef.isPipelineAgg(series.metric)) {
|
||||
var appliedAgg = _.find(target.metrics, { id: series.field });
|
||||
if (appliedAgg) {
|
||||
metricName += ' ' + queryDef.describeMetric(appliedAgg);
|
||||
} else {
|
||||
metricName = 'Unset';
|
||||
}
|
||||
} else if (series.field) {
|
||||
metricName += ' ' + series.field;
|
||||
}
|
||||
|
||||
var propKeys = _.keys(series.props);
|
||||
if (propKeys.length === 0) {
|
||||
return metricName;
|
||||
}
|
||||
|
||||
var name = '';
|
||||
for (var propName in series.props) {
|
||||
name += series.props[propName] + ' ';
|
||||
}
|
||||
|
||||
if (metricTypeCount === 1) {
|
||||
return name.trim();
|
||||
}
|
||||
|
||||
return name.trim() + ' ' + metricName;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.nameSeries = function(seriesList, target) {
|
||||
var metricTypeCount = _.uniq(_.map(seriesList, 'metric')).length;
|
||||
var fieldNameCount = _.uniq(_.map(seriesList, 'field')).length;
|
||||
|
||||
for (var i = 0; i < seriesList.length; i++) {
|
||||
var series = seriesList[i];
|
||||
series.target = this._getSeriesName(series, target, metricTypeCount, fieldNameCount);
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.processHits = function(hits, seriesList) {
|
||||
var series = {target: 'docs', type: 'docs', datapoints: [], total: hits.total};
|
||||
var propName, hit, doc, i;
|
||||
|
||||
for (i = 0; i < hits.hits.length; i++) {
|
||||
hit = hits.hits[i];
|
||||
doc = {
|
||||
_id: hit._id,
|
||||
_type: hit._type,
|
||||
_index: hit._index
|
||||
};
|
||||
|
||||
if (hit._source) {
|
||||
for (propName in hit._source) {
|
||||
doc[propName] = hit._source[propName];
|
||||
}
|
||||
}
|
||||
|
||||
for (propName in hit.fields) {
|
||||
doc[propName] = hit.fields[propName];
|
||||
}
|
||||
series.datapoints.push(doc);
|
||||
}
|
||||
|
||||
seriesList.push(series);
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.trimDatapoints = function(aggregations, target) {
|
||||
var histogram = _.find(target.bucketAggs, { type: 'date_histogram'});
|
||||
|
||||
var shouldDropFirstAndLast = histogram && histogram.settings && histogram.settings.trimEdges;
|
||||
if (shouldDropFirstAndLast) {
|
||||
var trim = histogram.settings.trimEdges;
|
||||
for(var prop in aggregations) {
|
||||
var points = aggregations[prop];
|
||||
if (points.datapoints.length > trim * 2) {
|
||||
points.datapoints = points.datapoints.slice(trim, points.datapoints.length - trim);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.getErrorFromElasticResponse = function(response, err) {
|
||||
var result = {};
|
||||
result.data = JSON.stringify(err, null, 4);
|
||||
if (err.root_cause && err.root_cause.length > 0 && err.root_cause[0].reason) {
|
||||
result.message = err.root_cause[0].reason;
|
||||
} else {
|
||||
result.message = err.reason || 'Unkown elatic error response';
|
||||
}
|
||||
|
||||
if (response.$$config) {
|
||||
result.config = response.$$config;
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.getTimeSeries = function() {
|
||||
var seriesList = [];
|
||||
|
||||
for (var i = 0; i < this.response.responses.length; i++) {
|
||||
var response = this.response.responses[i];
|
||||
if (response.error) {
|
||||
throw this.getErrorFromElasticResponse(this.response, response.error);
|
||||
}
|
||||
|
||||
if (response.hits && response.hits.hits.length > 0) {
|
||||
this.processHits(response.hits, seriesList);
|
||||
}
|
||||
|
||||
if (response.aggregations) {
|
||||
var aggregations = response.aggregations;
|
||||
var target = this.targets[i];
|
||||
var tmpSeriesList = [];
|
||||
var docs = [];
|
||||
|
||||
this.processBuckets(aggregations, target, tmpSeriesList, docs, {}, 0);
|
||||
this.trimDatapoints(tmpSeriesList, target);
|
||||
this.nameSeries(tmpSeriesList, target);
|
||||
|
||||
for (var y = 0; y < tmpSeriesList.length; y++) {
|
||||
seriesList.push(tmpSeriesList[y]);
|
||||
}
|
||||
|
||||
if (seriesList.length === 0 && docs.length > 0) {
|
||||
seriesList.push({target: 'docs', type: 'docs', datapoints: docs});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { data: seriesList };
|
||||
};
|
||||
|
||||
return ElasticResponse;
|
||||
});
|
||||
360
public/app/plugins/datasource/elasticsearch/elastic_response.ts
Normal file
360
public/app/plugins/datasource/elasticsearch/elastic_response.ts
Normal file
@@ -0,0 +1,360 @@
|
||||
///<reference path="../../../headers/common.d.ts" />
|
||||
|
||||
import _ from 'lodash';
|
||||
import queryDef from "./query_def";
|
||||
import TableModel from 'app/core/table_model';
|
||||
|
||||
export function ElasticResponse(targets, response) {
|
||||
this.targets = targets;
|
||||
this.response = response;
|
||||
}
|
||||
|
||||
ElasticResponse.prototype.processMetrics = function(esAgg, target, seriesList, props) {
|
||||
var metric, y, i, newSeries, bucket, value;
|
||||
|
||||
for (y = 0; y < target.metrics.length; y++) {
|
||||
metric = target.metrics[y];
|
||||
if (metric.hide) {
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (metric.type) {
|
||||
case 'count': {
|
||||
newSeries = { datapoints: [], metric: 'count', props: props};
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
value = bucket.doc_count;
|
||||
newSeries.datapoints.push([value, bucket.key]);
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
break;
|
||||
}
|
||||
case 'percentiles': {
|
||||
if (esAgg.buckets.length === 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
var firstBucket = esAgg.buckets[0];
|
||||
var percentiles = firstBucket[metric.id].values;
|
||||
|
||||
for (var percentileName in percentiles) {
|
||||
newSeries = {datapoints: [], metric: 'p' + percentileName, props: props, field: metric.field};
|
||||
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
var values = bucket[metric.id].values;
|
||||
newSeries.datapoints.push([values[percentileName], bucket.key]);
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case 'extended_stats': {
|
||||
for (var statName in metric.meta) {
|
||||
if (!metric.meta[statName]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
newSeries = {datapoints: [], metric: statName, props: props, field: metric.field};
|
||||
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
var stats = bucket[metric.id];
|
||||
|
||||
// add stats that are in nested obj to top level obj
|
||||
stats.std_deviation_bounds_upper = stats.std_deviation_bounds.upper;
|
||||
stats.std_deviation_bounds_lower = stats.std_deviation_bounds.lower;
|
||||
|
||||
newSeries.datapoints.push([stats[statName], bucket.key]);
|
||||
}
|
||||
|
||||
seriesList.push(newSeries);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
newSeries = { datapoints: [], metric: metric.type, field: metric.field, props: props};
|
||||
for (i = 0; i < esAgg.buckets.length; i++) {
|
||||
bucket = esAgg.buckets[i];
|
||||
|
||||
value = bucket[metric.id];
|
||||
if (value !== undefined) {
|
||||
if (value.normalized_value) {
|
||||
newSeries.datapoints.push([value.normalized_value, bucket.key]);
|
||||
} else {
|
||||
newSeries.datapoints.push([value.value, bucket.key]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
seriesList.push(newSeries);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.processAggregationDocs = function(esAgg, aggDef, target, table, props) {
|
||||
// add columns
|
||||
if (table.columns.length === 0) {
|
||||
for (let propKey of _.keys(props)) {
|
||||
table.addColumn({text: propKey, filterable: true});
|
||||
}
|
||||
table.addColumn({text: aggDef.field, filterable: true});
|
||||
}
|
||||
|
||||
// helper func to add values to value array
|
||||
let addMetricValue = (values, metricName, value) => {
|
||||
table.addColumn({text: metricName});
|
||||
values.push(value);
|
||||
};
|
||||
|
||||
for (let bucket of esAgg.buckets) {
|
||||
let values = [];
|
||||
|
||||
for (let propValues of _.values(props)) {
|
||||
values.push(propValues);
|
||||
}
|
||||
|
||||
// add bucket key (value)
|
||||
values.push(bucket.key);
|
||||
|
||||
for (let metric of target.metrics) {
|
||||
switch (metric.type) {
|
||||
case "count": {
|
||||
addMetricValue(values, this._getMetricName(metric.type), bucket.doc_count);
|
||||
break;
|
||||
}
|
||||
case 'extended_stats': {
|
||||
for (var statName in metric.meta) {
|
||||
if (!metric.meta[statName]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
var stats = bucket[metric.id];
|
||||
// add stats that are in nested obj to top level obj
|
||||
stats.std_deviation_bounds_upper = stats.std_deviation_bounds.upper;
|
||||
stats.std_deviation_bounds_lower = stats.std_deviation_bounds.lower;
|
||||
|
||||
addMetricValue(values, this._getMetricName(statName), stats[statName]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
let metricName = this._getMetricName(metric.type);
|
||||
let otherMetrics = _.filter(target.metrics, {type: metric.type});
|
||||
|
||||
// if more of the same metric type include field field name in property
|
||||
if (otherMetrics.length > 1) {
|
||||
metricName += ' ' + metric.field;
|
||||
}
|
||||
|
||||
addMetricValue(values, metricName, bucket[metric.id].value);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
table.rows.push(values);
|
||||
}
|
||||
};
|
||||
|
||||
// This is quite complex
|
||||
// neeed to recurise down the nested buckets to build series
|
||||
ElasticResponse.prototype.processBuckets = function(aggs, target, seriesList, table, props, depth) {
|
||||
var bucket, aggDef, esAgg, aggId;
|
||||
var maxDepth = target.bucketAggs.length-1;
|
||||
|
||||
for (aggId in aggs) {
|
||||
aggDef = _.find(target.bucketAggs, {id: aggId});
|
||||
esAgg = aggs[aggId];
|
||||
|
||||
if (!aggDef) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (depth === maxDepth) {
|
||||
if (aggDef.type === 'date_histogram') {
|
||||
this.processMetrics(esAgg, target, seriesList, props);
|
||||
} else {
|
||||
this.processAggregationDocs(esAgg, aggDef, target, table, props);
|
||||
}
|
||||
} else {
|
||||
for (var nameIndex in esAgg.buckets) {
|
||||
bucket = esAgg.buckets[nameIndex];
|
||||
props = _.clone(props);
|
||||
if (bucket.key !== void 0) {
|
||||
props[aggDef.field] = bucket.key;
|
||||
} else {
|
||||
props["filter"] = nameIndex;
|
||||
}
|
||||
if (bucket.key_as_string) {
|
||||
props[aggDef.field] = bucket.key_as_string;
|
||||
}
|
||||
this.processBuckets(bucket, target, seriesList, table, props, depth+1);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype._getMetricName = function(metric) {
|
||||
var metricDef = _.find(queryDef.metricAggTypes, {value: metric});
|
||||
if (!metricDef) {
|
||||
metricDef = _.find(queryDef.extendedStats, {value: metric});
|
||||
}
|
||||
|
||||
return metricDef ? metricDef.text : metric;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype._getSeriesName = function(series, target, metricTypeCount) {
|
||||
var metricName = this._getMetricName(series.metric);
|
||||
|
||||
if (target.alias) {
|
||||
var regex = /\{\{([\s\S]+?)\}\}/g;
|
||||
|
||||
return target.alias.replace(regex, function(match, g1, g2) {
|
||||
var group = g1 || g2;
|
||||
|
||||
if (group.indexOf('term ') === 0) { return series.props[group.substring(5)]; }
|
||||
if (series.props[group] !== void 0) { return series.props[group]; }
|
||||
if (group === 'metric') { return metricName; }
|
||||
if (group === 'field') { return series.field; }
|
||||
|
||||
return match;
|
||||
});
|
||||
}
|
||||
|
||||
if (series.field && queryDef.isPipelineAgg(series.metric)) {
|
||||
var appliedAgg = _.find(target.metrics, { id: series.field });
|
||||
if (appliedAgg) {
|
||||
metricName += ' ' + queryDef.describeMetric(appliedAgg);
|
||||
} else {
|
||||
metricName = 'Unset';
|
||||
}
|
||||
} else if (series.field) {
|
||||
metricName += ' ' + series.field;
|
||||
}
|
||||
|
||||
var propKeys = _.keys(series.props);
|
||||
if (propKeys.length === 0) {
|
||||
return metricName;
|
||||
}
|
||||
|
||||
var name = '';
|
||||
for (var propName in series.props) {
|
||||
name += series.props[propName] + ' ';
|
||||
}
|
||||
|
||||
if (metricTypeCount === 1) {
|
||||
return name.trim();
|
||||
}
|
||||
|
||||
return name.trim() + ' ' + metricName;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.nameSeries = function(seriesList, target) {
|
||||
var metricTypeCount = _.uniq(_.map(seriesList, 'metric')).length;
|
||||
var fieldNameCount = _.uniq(_.map(seriesList, 'field')).length;
|
||||
|
||||
for (var i = 0; i < seriesList.length; i++) {
|
||||
var series = seriesList[i];
|
||||
series.target = this._getSeriesName(series, target, metricTypeCount, fieldNameCount);
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.processHits = function(hits, seriesList) {
|
||||
var series = {target: 'docs', type: 'docs', datapoints: [], total: hits.total, filterable: true};
|
||||
var propName, hit, doc, i;
|
||||
|
||||
for (i = 0; i < hits.hits.length; i++) {
|
||||
hit = hits.hits[i];
|
||||
doc = {
|
||||
_id: hit._id,
|
||||
_type: hit._type,
|
||||
_index: hit._index
|
||||
};
|
||||
|
||||
if (hit._source) {
|
||||
for (propName in hit._source) {
|
||||
doc[propName] = hit._source[propName];
|
||||
}
|
||||
}
|
||||
|
||||
for (propName in hit.fields) {
|
||||
doc[propName] = hit.fields[propName];
|
||||
}
|
||||
series.datapoints.push(doc);
|
||||
}
|
||||
|
||||
seriesList.push(series);
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.trimDatapoints = function(aggregations, target) {
|
||||
var histogram = _.find(target.bucketAggs, { type: 'date_histogram'});
|
||||
|
||||
var shouldDropFirstAndLast = histogram && histogram.settings && histogram.settings.trimEdges;
|
||||
if (shouldDropFirstAndLast) {
|
||||
var trim = histogram.settings.trimEdges;
|
||||
for (var prop in aggregations) {
|
||||
var points = aggregations[prop];
|
||||
if (points.datapoints.length > trim * 2) {
|
||||
points.datapoints = points.datapoints.slice(trim, points.datapoints.length - trim);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.getErrorFromElasticResponse = function(response, err) {
|
||||
var result: any = {};
|
||||
result.data = JSON.stringify(err, null, 4);
|
||||
if (err.root_cause && err.root_cause.length > 0 && err.root_cause[0].reason) {
|
||||
result.message = err.root_cause[0].reason;
|
||||
} else {
|
||||
result.message = err.reason || 'Unkown elatic error response';
|
||||
}
|
||||
|
||||
if (response.$$config) {
|
||||
result.config = response.$$config;
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
ElasticResponse.prototype.getTimeSeries = function() {
|
||||
var seriesList = [];
|
||||
|
||||
for (var i = 0; i < this.response.responses.length; i++) {
|
||||
var response = this.response.responses[i];
|
||||
if (response.error) {
|
||||
throw this.getErrorFromElasticResponse(this.response, response.error);
|
||||
}
|
||||
|
||||
if (response.hits && response.hits.hits.length > 0) {
|
||||
this.processHits(response.hits, seriesList);
|
||||
}
|
||||
|
||||
if (response.aggregations) {
|
||||
var aggregations = response.aggregations;
|
||||
var target = this.targets[i];
|
||||
var tmpSeriesList = [];
|
||||
var table = new TableModel();
|
||||
|
||||
this.processBuckets(aggregations, target, tmpSeriesList, table, {}, 0);
|
||||
this.trimDatapoints(tmpSeriesList, target);
|
||||
this.nameSeries(tmpSeriesList, target);
|
||||
|
||||
for (var y = 0; y < tmpSeriesList.length; y++) {
|
||||
seriesList.push(tmpSeriesList[y]);
|
||||
}
|
||||
|
||||
if (table.rows.length > 0) {
|
||||
seriesList.push(table);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { data: seriesList };
|
||||
};
|
||||
|
||||
@@ -129,7 +129,10 @@ describe('ElasticDatasource', function() {
|
||||
'@timestamp': {type: 'date'},
|
||||
beat: {
|
||||
properties: {
|
||||
name: {type: 'string'},
|
||||
name: {
|
||||
fields: {raw: {type: 'keyword'}},
|
||||
type: 'string'
|
||||
},
|
||||
hostname: {type: 'string'},
|
||||
}
|
||||
},
|
||||
@@ -169,6 +172,7 @@ describe('ElasticDatasource', function() {
|
||||
var fields = _.map(fieldObjects, 'text');
|
||||
expect(fields).to.eql([
|
||||
'@timestamp',
|
||||
'beat.name.raw',
|
||||
'beat.name',
|
||||
'beat.hostname',
|
||||
'system.cpu.system',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
import {describe, beforeEach, it, expect} from 'test/lib/common';
|
||||
import ElasticResponse from '../elastic_response';
|
||||
import {ElasticResponse} from '../elastic_response';
|
||||
|
||||
describe('ElasticResponse', function() {
|
||||
var targets;
|
||||
@@ -387,10 +387,9 @@ describe('ElasticResponse', function() {
|
||||
result = new ElasticResponse(targets, response).getTimeSeries();
|
||||
});
|
||||
|
||||
it('should return docs with byte and count', function() {
|
||||
expect(result.data[0].datapoints.length).to.be(3);
|
||||
expect(result.data[0].datapoints[0].Count).to.be(1);
|
||||
expect(result.data[0].datapoints[0].bytes).to.be(1000);
|
||||
it('should return table with byte and count', function() {
|
||||
expect(result.data[0].rows.length).to.be(3);
|
||||
expect(result.data[0].columns).to.eql([{text: 'bytes', filterable: true}, {text: 'Count'}]);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -530,14 +529,14 @@ describe('ElasticResponse', function() {
|
||||
|
||||
it('should return table', function() {
|
||||
expect(result.data.length).to.be(1);
|
||||
expect(result.data[0].type).to.be('docs');
|
||||
expect(result.data[0].datapoints.length).to.be(2);
|
||||
expect(result.data[0].datapoints[0].host).to.be("server-1");
|
||||
expect(result.data[0].datapoints[0].Average).to.be(1000);
|
||||
expect(result.data[0].datapoints[0].Count).to.be(369);
|
||||
expect(result.data[0].type).to.be('table');
|
||||
expect(result.data[0].rows.length).to.be(2);
|
||||
expect(result.data[0].rows[0][0]).to.be("server-1");
|
||||
expect(result.data[0].rows[0][1]).to.be(1000);
|
||||
expect(result.data[0].rows[0][2]).to.be(369);
|
||||
|
||||
expect(result.data[0].datapoints[1].host).to.be("server-2");
|
||||
expect(result.data[0].datapoints[1].Average).to.be(2000);
|
||||
expect(result.data[0].rows[1][0]).to.be("server-2");
|
||||
expect(result.data[0].rows[1][1]).to.be(2000);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -573,10 +572,9 @@ describe('ElasticResponse', function() {
|
||||
});
|
||||
|
||||
it('should include field in metric name', function() {
|
||||
expect(result.data[0].type).to.be('docs');
|
||||
expect(result.data[0].datapoints[0].Average).to.be(undefined);
|
||||
expect(result.data[0].datapoints[0]['Average test']).to.be(1000);
|
||||
expect(result.data[0].datapoints[0]['Average test2']).to.be(3000);
|
||||
expect(result.data[0].type).to.be('table');
|
||||
expect(result.data[0].rows[0][1]).to.be(1000);
|
||||
expect(result.data[0].rows[0][2]).to.be(3000);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -20,9 +20,10 @@ function (angular, _, $, gfunc) {
|
||||
|
||||
return {
|
||||
link: function($scope, elem) {
|
||||
var categories = gfunc.getCategories();
|
||||
var allFunctions = getAllFunctionNames(categories);
|
||||
var ctrl = $scope.ctrl;
|
||||
var graphiteVersion = ctrl.datasource.graphiteVersion;
|
||||
var categories = gfunc.getCategories(graphiteVersion);
|
||||
var allFunctions = getAllFunctionNames(categories);
|
||||
|
||||
$scope.functionMenu = createFunctionDropDownMenu(categories);
|
||||
|
||||
@@ -94,14 +95,16 @@ function (angular, _, $, gfunc) {
|
||||
|
||||
function createFunctionDropDownMenu(categories) {
|
||||
return _.map(categories, function(list, category) {
|
||||
return {
|
||||
text: category,
|
||||
submenu: _.map(list, function(value) {
|
||||
var submenu = _.map(list, function(value) {
|
||||
return {
|
||||
text: value.name,
|
||||
click: "ctrl.addFunction('" + value.name + "')",
|
||||
};
|
||||
})
|
||||
});
|
||||
|
||||
return {
|
||||
text: category,
|
||||
submenu: submenu
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
20
public/app/plugins/datasource/graphite/config_ctrl.ts
Normal file
20
public/app/plugins/datasource/graphite/config_ctrl.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
///<reference path="../../../headers/common.d.ts" />
|
||||
|
||||
import angular from 'angular';
|
||||
import _ from 'lodash';
|
||||
|
||||
export class GraphiteConfigCtrl {
|
||||
static templateUrl = 'public/app/plugins/datasource/graphite/partials/config.html';
|
||||
current: any;
|
||||
|
||||
/** @ngInject */
|
||||
constructor($scope) {
|
||||
this.current.jsonData.graphiteVersion = this.current.jsonData.graphiteVersion || '0.9';
|
||||
}
|
||||
|
||||
graphiteVersions = [
|
||||
{name: '0.9.x', value: '0.9'},
|
||||
{name: '1.0.x', value: '1.0'},
|
||||
];
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ export function GraphiteDatasource(instanceSettings, $q, backendSrv, templateSrv
|
||||
this.basicAuth = instanceSettings.basicAuth;
|
||||
this.url = instanceSettings.url;
|
||||
this.name = instanceSettings.name;
|
||||
this.graphiteVersion = instanceSettings.jsonData.graphiteVersion || '0.9';
|
||||
this.cacheTimeout = instanceSettings.cacheTimeout;
|
||||
this.withCredentials = instanceSettings.withCredentials;
|
||||
this.render_method = instanceSettings.render_method || 'POST';
|
||||
@@ -175,8 +176,8 @@ export function GraphiteDatasource(instanceSettings, $q, backendSrv, templateSrv
|
||||
};
|
||||
|
||||
if (options && options.range) {
|
||||
httpOptions.params.from = this.translateTime(options.range.raw.from, false);
|
||||
httpOptions.params.until = this.translateTime(options.range.raw.to, true);
|
||||
httpOptions.params.from = this.translateTime(options.range.from, false);
|
||||
httpOptions.params.until = this.translateTime(options.range.to, true);
|
||||
}
|
||||
|
||||
return this.doGraphiteRequest(httpOptions).then(results => {
|
||||
|
||||
@@ -115,27 +115,6 @@ function (_, $) {
|
||||
category: categories.Combine,
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'mapSeries',
|
||||
shortName: 'map',
|
||||
params: [{ name: "node", type: 'int' }],
|
||||
defaultParams: [3],
|
||||
category: categories.Combine,
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'reduceSeries',
|
||||
shortName: 'reduce',
|
||||
params: [
|
||||
{ name: "function", type: 'string', options: ['asPercent', 'diffSeries', 'divideSeries'] },
|
||||
{ name: "reduceNode", type: 'int', options: [0,1,2,3,4,5,6,7,8,9,10,11,12,13] },
|
||||
{ name: "reduceMatchers", type: 'string' },
|
||||
{ name: "reduceMatchers", type: 'string' },
|
||||
],
|
||||
defaultParams: ['asPercent', 2, 'used_bytes', 'total_bytes'],
|
||||
category: categories.Combine,
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'sumSeries',
|
||||
shortName: 'sum',
|
||||
@@ -152,11 +131,6 @@ function (_, $) {
|
||||
defaultParams: [''],
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'isNonNull',
|
||||
category: categories.Combine,
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'rangeOfSeries',
|
||||
category: categories.Combine
|
||||
@@ -262,23 +236,6 @@ function (_, $) {
|
||||
defaultParams: [3, "sum"]
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: "groupByNodes",
|
||||
category: categories.Special,
|
||||
params: [
|
||||
{
|
||||
name: "function",
|
||||
type: "string",
|
||||
options: ['sum', 'avg', 'maxSeries']
|
||||
},
|
||||
{ name: "node", type: "int", options: [0,1,2,3,4,5,6,7,8,9,10,12] },
|
||||
{ name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true },
|
||||
{ name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true },
|
||||
{ name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true },
|
||||
],
|
||||
defaultParams: ["sum", 3]
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'aliasByNode',
|
||||
category: categories.Special,
|
||||
@@ -381,11 +338,6 @@ function (_, $) {
|
||||
defaultParams: [10]
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'offsetToZero',
|
||||
category: categories.Transform,
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'transformNull',
|
||||
category: categories.Transform,
|
||||
@@ -542,13 +494,6 @@ function (_, $) {
|
||||
defaultParams: ['exclude']
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: "grep",
|
||||
category: categories.Filter,
|
||||
params: [{ name: "grep", type: 'string' }],
|
||||
defaultParams: ['grep']
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'highestCurrent',
|
||||
category: categories.Filter,
|
||||
@@ -577,16 +522,6 @@ function (_, $) {
|
||||
defaultParams: [10]
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'weightedAverage',
|
||||
category: categories.Filter,
|
||||
params: [
|
||||
{ name: 'other', type: 'value_or_series', optional: true },
|
||||
{ name: "node", type: "int", options: [0,1,2,3,4,5,6,7,8,9,10,12] },
|
||||
],
|
||||
defaultParams: ['#A', 4]
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'movingMedian',
|
||||
category: categories.Filter,
|
||||
@@ -643,11 +578,6 @@ function (_, $) {
|
||||
defaultParams: [5]
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'removeEmptySeries',
|
||||
category: categories.Filter
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'useSeriesAbove',
|
||||
category: categories.Filter,
|
||||
@@ -659,6 +589,239 @@ function (_, $) {
|
||||
defaultParams: [0, 'search', 'replace']
|
||||
});
|
||||
|
||||
////////////////////
|
||||
// Graphite 1.0.x //
|
||||
////////////////////
|
||||
|
||||
addFuncDef({
|
||||
name: 'aggregateLine',
|
||||
category: categories.Combine,
|
||||
params: [{ name: "func", type: "select", options: ['sum', 'avg', 'min', 'max', 'last']}],
|
||||
defaultParams: ['avg'],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'averageOutsidePercentile',
|
||||
category: categories.Filter,
|
||||
params: [{ name: "n", type: "int", }],
|
||||
defaultParams: [95],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'delay',
|
||||
category: categories.Transform,
|
||||
params: [{ name: 'steps', type: 'int', }],
|
||||
defaultParams: [1],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'exponentialMovingAverage',
|
||||
category: categories.Calculate,
|
||||
params: [{ name: 'windowSize', type: 'int_or_interval', options: ['5', '7', '10', '5min', '10min', '30min', '1hour'] }],
|
||||
defaultParams: [10],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'fallbackSeries',
|
||||
category: categories.Special,
|
||||
params: [{ name: 'fallback', type: 'string' }],
|
||||
defaultParams: ['constantLine(0)'],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: "grep",
|
||||
category: categories.Filter,
|
||||
params: [{ name: "grep", type: 'string' }],
|
||||
defaultParams: ['grep'],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: "groupByNodes",
|
||||
category: categories.Special,
|
||||
params: [
|
||||
{
|
||||
name: "function",
|
||||
type: "string",
|
||||
options: ['sum', 'avg', 'maxSeries']
|
||||
},
|
||||
{ name: "node", type: "int", options: [0,1,2,3,4,5,6,7,8,9,10,12] },
|
||||
{ name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true },
|
||||
{ name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true },
|
||||
{ name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true },
|
||||
],
|
||||
defaultParams: ["sum", 3],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'integralByInterval',
|
||||
category: categories.Transform,
|
||||
params: [{ name: "intervalUnit", type: "select", options: ['1h', '6h', '12h', '1d', '2d', '7d', '14d', '30d'] }],
|
||||
defaultParams: ['1d'],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'interpolate',
|
||||
category: categories.Transform,
|
||||
params: [{ name: 'limit', type: 'int', optional: true}],
|
||||
defaultParams: [],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'invert',
|
||||
category: categories.Transform,
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'isNonNull',
|
||||
category: categories.Combine,
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'linearRegression',
|
||||
category: categories.Calculate,
|
||||
params: [
|
||||
{ name: "startSourceAt", type: "select", options: ['-1h', '-6h', '-12h', '-1d', '-2d', '-7d', '-14d', '-30d'], optional: true },
|
||||
{ name: "endSourceAt", type: "select", options: ['-1h', '-6h', '-12h', '-1d', '-2d', '-7d', '-14d', '-30d'], optional: true }
|
||||
],
|
||||
defaultParams: [],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'mapSeries',
|
||||
shortName: 'map',
|
||||
params: [{ name: "node", type: 'int' }],
|
||||
defaultParams: [3],
|
||||
category: categories.Combine,
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'movingMin',
|
||||
category: categories.Calculate,
|
||||
params: [{ name: 'windowSize', type: 'int_or_interval', options: ['5', '7', '10', '5min', '10min', '30min', '1hour'] }],
|
||||
defaultParams: [10],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'movingMax',
|
||||
category: categories.Calculate,
|
||||
params: [{ name: 'windowSize', type: 'int_or_interval', options: ['5', '7', '10', '5min', '10min', '30min', '1hour'] }],
|
||||
defaultParams: [10],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'movingSum',
|
||||
category: categories.Calculate,
|
||||
params: [{ name: 'windowSize', type: 'int_or_interval', options: ['5', '7', '10', '5min', '10min', '30min', '1hour'] }],
|
||||
defaultParams: [10],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: "multiplySeriesWithWildcards",
|
||||
category: categories.Calculate,
|
||||
params: [
|
||||
{ name: "position", type: "int", options: [0,1,2,3,4,5,6,7,8,9,10,12] },
|
||||
{ name: "position", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true },
|
||||
{ name: "position", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true },
|
||||
{ name: "position", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true },
|
||||
],
|
||||
defaultParams: [2],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'offsetToZero',
|
||||
category: categories.Transform,
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'pow',
|
||||
category: categories.Transform,
|
||||
params: [{ name: 'factor', type: 'int' }],
|
||||
defaultParams: [10],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'powSeries',
|
||||
category: categories.Transform,
|
||||
params: optionalSeriesRefArgs,
|
||||
defaultParams: [''],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'reduceSeries',
|
||||
shortName: 'reduce',
|
||||
params: [
|
||||
{ name: "function", type: 'string', options: ['asPercent', 'diffSeries', 'divideSeries'] },
|
||||
{ name: "reduceNode", type: 'int', options: [0,1,2,3,4,5,6,7,8,9,10,11,12,13] },
|
||||
{ name: "reduceMatchers", type: 'string' },
|
||||
{ name: "reduceMatchers", type: 'string' },
|
||||
],
|
||||
defaultParams: ['asPercent', 2, 'used_bytes', 'total_bytes'],
|
||||
category: categories.Combine,
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'removeBetweenPercentile',
|
||||
category: categories.Filter,
|
||||
params: [{ name: "n", type: "int", }],
|
||||
defaultParams: [95],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'removeEmptySeries',
|
||||
category: categories.Filter,
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'squareRoot',
|
||||
category: categories.Transform,
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'timeSlice',
|
||||
category: categories.Transform,
|
||||
params: [
|
||||
{ name: "startSliceAt", type: "select", options: ['-1h', '-6h', '-12h', '-1d', '-2d', '-7d', '-14d', '-30d']},
|
||||
{ name: "endSliceAt", type: "select", options: ['-1h', '-6h', '-12h', '-1d', '-2d', '-7d', '-14d', '-30d'], optional: true }
|
||||
],
|
||||
defaultParams: ['-1h'],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
addFuncDef({
|
||||
name: 'weightedAverage',
|
||||
category: categories.Filter,
|
||||
params: [
|
||||
{ name: 'other', type: 'value_or_series', optional: true },
|
||||
{ name: "node", type: "int", options: [0,1,2,3,4,5,6,7,8,9,10,12] },
|
||||
],
|
||||
defaultParams: ['#A', 4],
|
||||
version: '1.0'
|
||||
});
|
||||
|
||||
_.each(categories, function(funcList, catName) {
|
||||
categories[catName] = _.sortBy(funcList, 'name');
|
||||
});
|
||||
@@ -737,6 +900,16 @@ function (_, $) {
|
||||
this.text = text;
|
||||
};
|
||||
|
||||
function isVersionRelatedFunction(func, graphiteVersion) {
|
||||
return isVersionGreaterOrEqual(graphiteVersion, func.version) || !func.version;
|
||||
}
|
||||
|
||||
function isVersionGreaterOrEqual(a, b) {
|
||||
var a_num = Number(a);
|
||||
var b_num = Number(b);
|
||||
return a_num >= b_num;
|
||||
}
|
||||
|
||||
return {
|
||||
createFuncInstance: function(funcDef, options) {
|
||||
if (_.isString(funcDef)) {
|
||||
@@ -752,8 +925,18 @@ function (_, $) {
|
||||
return index[name];
|
||||
},
|
||||
|
||||
getCategories: function() {
|
||||
return categories;
|
||||
getCategories: function(graphiteVersion) {
|
||||
var filteredCategories = {};
|
||||
_.each(categories, function(functions, category) {
|
||||
var filteredFuncs = _.filter(functions, function(func) {
|
||||
return isVersionRelatedFunction(func, graphiteVersion);
|
||||
});
|
||||
if (filteredFuncs.length) {
|
||||
filteredCategories[category] = filteredFuncs;
|
||||
}
|
||||
});
|
||||
|
||||
return filteredCategories;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import {GraphiteDatasource} from './datasource';
|
||||
import {GraphiteQueryCtrl} from './query_ctrl';
|
||||
|
||||
class GraphiteConfigCtrl {
|
||||
static templateUrl = 'partials/config.html';
|
||||
}
|
||||
import {GraphiteConfigCtrl} from './config_ctrl';
|
||||
|
||||
class GraphiteQueryOptionsCtrl {
|
||||
static templateUrl = 'partials/query.options.html';
|
||||
|
||||
@@ -3,3 +3,16 @@
|
||||
suggest-url="http://localhost:8080">
|
||||
</datasource-http-settings>
|
||||
|
||||
<h3 class="page-heading">Graphite details</h3>
|
||||
|
||||
<div class="gf-form-group">
|
||||
<div class="gf-form">
|
||||
<span class="gf-form-label width-8">
|
||||
Version
|
||||
<info-popover mode="right-normal" position="top center">
|
||||
This option controls what functions are available in the Graphite query editor.
|
||||
</info-popover>
|
||||
</span>
|
||||
<select class="gf-form-input gf-size-auto" ng-model="ctrl.current.jsonData.graphiteVersion" ng-options="f.value as f.name for f in ctrl.graphiteVersions"></select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -5,7 +5,7 @@ import {GraphiteDatasource} from "../datasource";
|
||||
|
||||
describe('graphiteDatasource', function() {
|
||||
var ctx = new helpers.ServiceTestContext();
|
||||
var instanceSettings: any = {url: [''], name: 'graphiteProd'};
|
||||
var instanceSettings: any = {url: [''], name: 'graphiteProd', jsonData: {}};
|
||||
|
||||
beforeEach(angularMocks.module('grafana.core'));
|
||||
beforeEach(angularMocks.module('grafana.services'));
|
||||
|
||||
@@ -28,7 +28,14 @@ An annotation is an event that is overlayed on top of graphs. The query can have
|
||||
|
||||
Macros:
|
||||
- $__time(column) -> UNIX_TIMESTAMP(column) as time_sec
|
||||
- $__timeFilter(column) -> UNIX_TIMESTAMP(time_date_time) > from AND UNIX_TIMESTAMP(time_date_time) < 1492750877
|
||||
- $__timeFilter(column) -> UNIX_TIMESTAMP(time_date_time) > 1492750877 AND UNIX_TIMESTAMP(time_date_time) < 1492750877
|
||||
- $__unixEpochFilter(column) -> time_unix_epoch > 1492750877 AND time_unix_epoch < 1492750877
|
||||
|
||||
Or build your own conditionals using these macros which just return the values:
|
||||
- $__timeFrom() -> FROM_UNIXTIME(1492750877)
|
||||
- $__timeTo() -> FROM_UNIXTIME(1492750877)
|
||||
- $__unixEpochFrom() -> 1492750877
|
||||
- $__unixEpochTo() -> 1492750877
|
||||
</pre>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -54,7 +54,14 @@ Table:
|
||||
|
||||
Macros:
|
||||
- $__time(column) -> UNIX_TIMESTAMP(column) as time_sec
|
||||
- $__timeFilter(column) -> UNIX_TIMESTAMP(time_date_time) ≥ from AND UNIX_TIMESTAMP(time_date_time) ≤ 1492750877
|
||||
- $__timeFilter(column) -> UNIX_TIMESTAMP(time_date_time) ≥ 1492750877 AND UNIX_TIMESTAMP(time_date_time) ≤ 1492750877
|
||||
- $__unixEpochFilter(column) -> time_unix_epoch > 1492750877 AND time_unix_epoch < 1492750877
|
||||
|
||||
Or build your own conditionals using these macros which just return the values:
|
||||
- $__timeFrom() -> FROM_UNIXTIME(1492750877)
|
||||
- $__timeTo() -> FROM_UNIXTIME(1492750877)
|
||||
- $__unixEpochFrom() -> 1492750877
|
||||
- $__unixEpochTo() -> 1492750877
|
||||
</pre>
|
||||
</div>
|
||||
|
||||
|
||||
300
public/app/plugins/panel/heatmap/color_legend.ts
Normal file
300
public/app/plugins/panel/heatmap/color_legend.ts
Normal file
@@ -0,0 +1,300 @@
|
||||
///<reference path="../../../headers/common.d.ts" />
|
||||
import angular from 'angular';
|
||||
import _ from 'lodash';
|
||||
import $ from 'jquery';
|
||||
import d3 from 'd3';
|
||||
import {contextSrv} from 'app/core/core';
|
||||
import {tickStep} from 'app/core/utils/ticks';
|
||||
|
||||
let module = angular.module('grafana.directives');
|
||||
|
||||
/**
|
||||
* Color legend for heatmap editor.
|
||||
*/
|
||||
module.directive('colorLegend', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
template: '<div class="heatmap-color-legend"><svg width="16.8rem" height="24px"></svg></div>',
|
||||
link: function(scope, elem, attrs) {
|
||||
let ctrl = scope.ctrl;
|
||||
let panel = scope.ctrl.panel;
|
||||
|
||||
render();
|
||||
|
||||
ctrl.events.on('render', function() {
|
||||
render();
|
||||
});
|
||||
|
||||
function render() {
|
||||
let legendElem = $(elem).find('svg');
|
||||
let legendWidth = Math.floor(legendElem.outerWidth());
|
||||
|
||||
if (panel.color.mode === 'spectrum') {
|
||||
let colorScheme = _.find(ctrl.colorSchemes, {value: panel.color.colorScheme});
|
||||
let colorScale = getColorScale(colorScheme, legendWidth);
|
||||
drawSimpleColorLegend(elem, colorScale);
|
||||
} else if (panel.color.mode === 'opacity') {
|
||||
let colorOptions = panel.color;
|
||||
drawSimpleOpacityLegend(elem, colorOptions);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
/**
|
||||
* Heatmap legend with scale values.
|
||||
*/
|
||||
module.directive('heatmapLegend', function() {
|
||||
return {
|
||||
restrict: 'E',
|
||||
template: '<div class="heatmap-color-legend"><svg width="100px" height="14px"></svg></div>',
|
||||
link: function(scope, elem, attrs) {
|
||||
let ctrl = scope.ctrl;
|
||||
let panel = scope.ctrl.panel;
|
||||
|
||||
render();
|
||||
ctrl.events.on('render', function() {
|
||||
render();
|
||||
});
|
||||
|
||||
function render() {
|
||||
clearLegend(elem);
|
||||
if (!_.isEmpty(ctrl.data) && !_.isEmpty(ctrl.data.cards)) {
|
||||
let rangeFrom = 0;
|
||||
let rangeTo = ctrl.data.cardStats.max;
|
||||
let maxValue = panel.color.max || rangeTo;
|
||||
let minValue = panel.color.min || 0;
|
||||
|
||||
if (panel.color.mode === 'spectrum') {
|
||||
let colorScheme = _.find(ctrl.colorSchemes, {value: panel.color.colorScheme});
|
||||
drawColorLegend(elem, colorScheme, rangeFrom, rangeTo, maxValue, minValue);
|
||||
} else if (panel.color.mode === 'opacity') {
|
||||
let colorOptions = panel.color;
|
||||
drawOpacityLegend(elem, colorOptions, rangeFrom, rangeTo, maxValue, minValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
function drawColorLegend(elem, colorScheme, rangeFrom, rangeTo, maxValue, minValue) {
|
||||
let legendElem = $(elem).find('svg');
|
||||
let legend = d3.select(legendElem.get(0));
|
||||
clearLegend(elem);
|
||||
|
||||
let legendWidth = Math.floor(legendElem.outerWidth()) - 30;
|
||||
let legendHeight = legendElem.attr("height");
|
||||
|
||||
let rangeStep = 1;
|
||||
if (rangeTo - rangeFrom > legendWidth) {
|
||||
rangeStep = Math.floor((rangeTo - rangeFrom) / legendWidth);
|
||||
}
|
||||
let widthFactor = legendWidth / (rangeTo - rangeFrom);
|
||||
let valuesRange = d3.range(rangeFrom, rangeTo, rangeStep);
|
||||
|
||||
let colorScale = getColorScale(colorScheme, maxValue, minValue);
|
||||
legend.selectAll(".heatmap-color-legend-rect")
|
||||
.data(valuesRange)
|
||||
.enter().append("rect")
|
||||
.attr("x", d => d * widthFactor)
|
||||
.attr("y", 0)
|
||||
.attr("width", rangeStep * widthFactor + 1) // Overlap rectangles to prevent gaps
|
||||
.attr("height", legendHeight)
|
||||
.attr("stroke-width", 0)
|
||||
.attr("fill", d => colorScale(d));
|
||||
|
||||
drawLegendValues(elem, colorScale, rangeFrom, rangeTo, maxValue, minValue, legendWidth);
|
||||
}
|
||||
|
||||
function drawOpacityLegend(elem, options, rangeFrom, rangeTo, maxValue, minValue) {
|
||||
let legendElem = $(elem).find('svg');
|
||||
let legend = d3.select(legendElem.get(0));
|
||||
clearLegend(elem);
|
||||
|
||||
let legendWidth = Math.floor(legendElem.outerWidth()) - 30;
|
||||
let legendHeight = legendElem.attr("height");
|
||||
|
||||
let rangeStep = 10;
|
||||
let widthFactor = legendWidth / (rangeTo - rangeFrom);
|
||||
let valuesRange = d3.range(rangeFrom, rangeTo, rangeStep);
|
||||
|
||||
let opacityScale = getOpacityScale(options, maxValue, minValue);
|
||||
legend.selectAll(".heatmap-opacity-legend-rect")
|
||||
.data(valuesRange)
|
||||
.enter().append("rect")
|
||||
.attr("x", d => d * widthFactor)
|
||||
.attr("y", 0)
|
||||
.attr("width", rangeStep * widthFactor)
|
||||
.attr("height", legendHeight)
|
||||
.attr("stroke-width", 0)
|
||||
.attr("fill", options.cardColor)
|
||||
.style("opacity", d => opacityScale(d));
|
||||
|
||||
drawLegendValues(elem, opacityScale, rangeFrom, rangeTo, maxValue, minValue, legendWidth);
|
||||
}
|
||||
|
||||
function drawLegendValues(elem, colorScale, rangeFrom, rangeTo, maxValue, minValue, legendWidth) {
|
||||
let legendElem = $(elem).find('svg');
|
||||
let legend = d3.select(legendElem.get(0));
|
||||
|
||||
if (legendWidth <= 0 || legendElem.get(0).childNodes.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
let legendValueDomain = _.sortBy(colorScale.domain());
|
||||
let legendValueScale = d3.scaleLinear()
|
||||
.domain([0, rangeTo])
|
||||
.range([0, legendWidth]);
|
||||
|
||||
let ticks = buildLegendTicks(0, rangeTo, maxValue, minValue);
|
||||
let xAxis = d3.axisBottom(legendValueScale)
|
||||
.tickValues(ticks)
|
||||
.tickSize(2);
|
||||
|
||||
let colorRect = legendElem.find(":first-child");
|
||||
let posY = colorRect.height() + 2;
|
||||
let posX = getSvgElemX(colorRect);
|
||||
d3.select(legendElem.get(0)).append("g")
|
||||
.attr("class", "axis")
|
||||
.attr("transform", "translate(" + posX + "," + posY + ")")
|
||||
.call(xAxis);
|
||||
|
||||
legend.select(".axis").select(".domain").remove();
|
||||
}
|
||||
|
||||
function drawSimpleColorLegend(elem, colorScale) {
|
||||
let legendElem = $(elem).find('svg');
|
||||
clearLegend(elem);
|
||||
|
||||
let legendWidth = Math.floor(legendElem.outerWidth());
|
||||
let legendHeight = legendElem.attr("height");
|
||||
|
||||
if (legendWidth) {
|
||||
let valuesNumber = Math.floor(legendWidth / 2);
|
||||
let rangeStep = Math.floor(legendWidth / valuesNumber);
|
||||
let valuesRange = d3.range(0, legendWidth, rangeStep);
|
||||
|
||||
let legend = d3.select(legendElem.get(0));
|
||||
var legendRects = legend.selectAll(".heatmap-color-legend-rect").data(valuesRange);
|
||||
|
||||
legendRects.enter().append("rect")
|
||||
.attr("x", d => d)
|
||||
.attr("y", 0)
|
||||
.attr("width", rangeStep + 1) // Overlap rectangles to prevent gaps
|
||||
.attr("height", legendHeight)
|
||||
.attr("stroke-width", 0)
|
||||
.attr("fill", d => colorScale(d));
|
||||
}
|
||||
}
|
||||
|
||||
function drawSimpleOpacityLegend(elem, options) {
|
||||
let legendElem = $(elem).find('svg');
|
||||
clearLegend(elem);
|
||||
|
||||
let legend = d3.select(legendElem.get(0));
|
||||
let legendWidth = Math.floor(legendElem.outerWidth());
|
||||
let legendHeight = legendElem.attr("height");
|
||||
|
||||
if (legendWidth) {
|
||||
let legendOpacityScale;
|
||||
if (options.colorScale === 'linear') {
|
||||
legendOpacityScale = d3.scaleLinear()
|
||||
.domain([0, legendWidth])
|
||||
.range([0, 1]);
|
||||
} else if (options.colorScale === 'sqrt') {
|
||||
legendOpacityScale = d3.scalePow().exponent(options.exponent)
|
||||
.domain([0, legendWidth])
|
||||
.range([0, 1]);
|
||||
}
|
||||
|
||||
let rangeStep = 10;
|
||||
let valuesRange = d3.range(0, legendWidth, rangeStep);
|
||||
var legendRects = legend.selectAll(".heatmap-opacity-legend-rect").data(valuesRange);
|
||||
|
||||
legendRects.enter().append("rect")
|
||||
.attr("x", d => d)
|
||||
.attr("y", 0)
|
||||
.attr("width", rangeStep)
|
||||
.attr("height", legendHeight)
|
||||
.attr("stroke-width", 0)
|
||||
.attr("fill", options.cardColor)
|
||||
.style("opacity", d => legendOpacityScale(d));
|
||||
}
|
||||
}
|
||||
|
||||
function clearLegend(elem) {
|
||||
let legendElem = $(elem).find('svg');
|
||||
legendElem.empty();
|
||||
}
|
||||
|
||||
function getColorScale(colorScheme, maxValue, minValue = 0) {
|
||||
let colorInterpolator = d3[colorScheme.value];
|
||||
let colorScaleInverted = colorScheme.invert === 'always' ||
|
||||
(colorScheme.invert === 'dark' && !contextSrv.user.lightTheme);
|
||||
|
||||
let start = colorScaleInverted ? maxValue : minValue;
|
||||
let end = colorScaleInverted ? minValue : maxValue;
|
||||
|
||||
return d3.scaleSequential(colorInterpolator).domain([start, end]);
|
||||
}
|
||||
|
||||
function getOpacityScale(options, maxValue, minValue = 0) {
|
||||
let legendOpacityScale;
|
||||
if (options.colorScale === 'linear') {
|
||||
legendOpacityScale = d3.scaleLinear()
|
||||
.domain([minValue, maxValue])
|
||||
.range([0, 1]);
|
||||
} else if (options.colorScale === 'sqrt') {
|
||||
legendOpacityScale = d3.scalePow().exponent(options.exponent)
|
||||
.domain([minValue, maxValue])
|
||||
.range([0, 1]);
|
||||
}
|
||||
return legendOpacityScale;
|
||||
}
|
||||
|
||||
function getSvgElemX(elem) {
|
||||
let svgElem = elem.get(0);
|
||||
if (svgElem && svgElem.x && svgElem.x.baseVal) {
|
||||
return elem.get(0).x.baseVal.value;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
function buildLegendTicks(rangeFrom, rangeTo, maxValue, minValue) {
|
||||
let range = rangeTo - rangeFrom;
|
||||
let tickStepSize = tickStep(rangeFrom, rangeTo, 3);
|
||||
let ticksNum = Math.round(range / tickStepSize);
|
||||
let ticks = [];
|
||||
|
||||
for (let i = 0; i < ticksNum; i++) {
|
||||
let current = tickStepSize * i;
|
||||
// Add user-defined min and max if it had been set
|
||||
if (isValueCloseTo(minValue, current, tickStepSize)) {
|
||||
ticks.push(minValue);
|
||||
continue;
|
||||
} else if (minValue < current) {
|
||||
ticks.push(minValue);
|
||||
}
|
||||
if (isValueCloseTo(maxValue, current, tickStepSize)) {
|
||||
ticks.push(maxValue);
|
||||
continue;
|
||||
} else if (maxValue < current) {
|
||||
ticks.push(maxValue);
|
||||
}
|
||||
ticks.push(tickStepSize * i);
|
||||
}
|
||||
if (!isValueCloseTo(maxValue, rangeTo, tickStepSize)) {
|
||||
ticks.push(maxValue);
|
||||
}
|
||||
ticks.push(rangeTo);
|
||||
ticks = _.sortBy(_.uniq(ticks));
|
||||
return ticks;
|
||||
}
|
||||
|
||||
function isValueCloseTo(val, valueTo, step) {
|
||||
let diff = Math.abs(val - valueTo);
|
||||
return diff < step * 0.3;
|
||||
}
|
||||
@@ -1,4 +1,10 @@
|
||||
///<reference path="../../../headers/common.d.ts" />
|
||||
import _ from 'lodash';
|
||||
import $ from 'jquery';
|
||||
import d3 from 'd3';
|
||||
import {contextSrv} from 'app/core/core';
|
||||
|
||||
const COLOR_LEGEND_SELECTOR = '.heatmap-color-legend';
|
||||
|
||||
export class HeatmapDisplayEditorCtrl {
|
||||
panel: any;
|
||||
|
||||
@@ -7,7 +7,7 @@ import TimeSeries from 'app/core/time_series';
|
||||
import {axesEditor} from './axes_editor';
|
||||
import {heatmapDisplayEditor} from './display_editor';
|
||||
import rendering from './rendering';
|
||||
import { convertToHeatMap, elasticHistogramToHeatmap, calculateBucketSize, getMinLog} from './heatmap_data_converter';
|
||||
import {convertToHeatMap, convertToCards, elasticHistogramToHeatmap, calculateBucketSize, getMinLog} from './heatmap_data_converter';
|
||||
|
||||
let X_BUCKET_NUMBER_DEFAULT = 30;
|
||||
let Y_BUCKET_NUMBER_DEFAULT = 10;
|
||||
@@ -26,6 +26,9 @@ let panelDefaults = {
|
||||
exponent: 0.5,
|
||||
colorScheme: 'interpolateOranges',
|
||||
},
|
||||
legend: {
|
||||
show: false
|
||||
},
|
||||
dataFormat: 'timeseries',
|
||||
xAxis: {
|
||||
show: true,
|
||||
@@ -188,11 +191,15 @@ export class HeatmapCtrl extends MetricsPanelCtrl {
|
||||
yBucketSize = 1;
|
||||
}
|
||||
|
||||
let {cards, cardStats} = convertToCards(bucketsData);
|
||||
|
||||
this.data = {
|
||||
buckets: bucketsData,
|
||||
heatmapStats: heatmapStats,
|
||||
xBucketSize: xBucketSize,
|
||||
yBucketSize: yBucketSize
|
||||
yBucketSize: yBucketSize,
|
||||
cards: cards,
|
||||
cardStats: cardStats
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -51,6 +51,7 @@ function elasticHistogramToHeatmap(seriesList) {
|
||||
* @return {Array} Array of "card" objects
|
||||
*/
|
||||
function convertToCards(buckets) {
|
||||
let min = 0, max = 0;
|
||||
let cards = [];
|
||||
_.forEach(buckets, xBucket => {
|
||||
_.forEach(xBucket.buckets, yBucket=> {
|
||||
@@ -62,10 +63,19 @@ function convertToCards(buckets) {
|
||||
count: yBucket.count,
|
||||
};
|
||||
cards.push(card);
|
||||
|
||||
if (cards.length === 1) {
|
||||
min = yBucket.count;
|
||||
max = yBucket.count;
|
||||
}
|
||||
|
||||
min = yBucket.count < min ? yBucket.count : min;
|
||||
max = yBucket.count > max ? yBucket.count : max;
|
||||
});
|
||||
});
|
||||
|
||||
return cards;
|
||||
let cardStats = {min, max};
|
||||
return {cards, cardStats};
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -7,5 +7,8 @@
|
||||
|
||||
<div class="heatmap-panel" ng-dblclick="ctrl.zoomOut()"></div>
|
||||
</div>
|
||||
<div class="heatmap-legend-wrapper" ng-if="ctrl.panel.legend.show">
|
||||
<heatmap-legend></heatmap-legend>
|
||||
</div>
|
||||
</div>
|
||||
<div class="clearfix"></div>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
///<reference path="../../../headers/common.d.ts" />
|
||||
|
||||
import './color_legend';
|
||||
import {HeatmapCtrl} from './heatmap_ctrl';
|
||||
|
||||
export {
|
||||
|
||||
@@ -25,9 +25,6 @@
|
||||
<label class="gf-form-label width-9">Exponent</label>
|
||||
<input type="number" class="gf-form-input width-8" placeholder="auto" data-placement="right" bs-tooltip="''" ng-model="ctrl.panel.color.exponent" ng-change="ctrl.refresh()" ng-model-onblur>
|
||||
</div>
|
||||
<div class="gf-form">
|
||||
<svg id="heatmap-opacity-legend" width="19em" height="2em"></svg>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div ng-show="ctrl.panel.color.mode === 'spectrum'">
|
||||
@@ -37,10 +34,31 @@
|
||||
<select class="input-small gf-form-input" ng-model="ctrl.panel.color.colorScheme" ng-options="s.value as s.name for s in ctrl.colorSchemes" ng-change="ctrl.render()"></select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="gf-form">
|
||||
<svg id="heatmap-color-legend" width="19em" height="2em"></svg>
|
||||
<color-legend></color-legend>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="section gf-form-group">
|
||||
<h5 class="section-heading">Color scale</h5>
|
||||
<div class="gf-form">
|
||||
<label class="gf-form-label width-8">Min</label>
|
||||
<input type="number" ng-model="ctrl.panel.color.min" class="gf-form-input width-5" placeholder="auto" data-placement="right" bs-tooltip="''" ng-change="ctrl.refresh()" ng-model-onblur>
|
||||
</div>
|
||||
<div class="gf-form">
|
||||
<label class="gf-form-label width-8">Max</label>
|
||||
<input type="number" ng-model="ctrl.panel.color.max" class="gf-form-input width-5" placeholder="auto" data-placement="right" bs-tooltip="''" ng-change="ctrl.refresh()" ng-model-onblur>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="section gf-form-group">
|
||||
<h5 class="section-heading">Legend</h5>
|
||||
<gf-form-switch class="gf-form" label-class="width-8"
|
||||
label="Show legend"
|
||||
checked="ctrl.panel.legend.show" on-change="ctrl.render()">
|
||||
</gf-form-switch>
|
||||
</div>
|
||||
|
||||
<div class="section gf-form-group">
|
||||
|
||||
@@ -8,7 +8,7 @@ import {appEvents, contextSrv} from 'app/core/core';
|
||||
import {tickStep, getScaledDecimals, getFlotTickSize} from 'app/core/utils/ticks';
|
||||
import d3 from 'd3';
|
||||
import {HeatmapTooltip} from './heatmap_tooltip';
|
||||
import {convertToCards, mergeZeroBuckets} from './heatmap_data_converter';
|
||||
import {mergeZeroBuckets} from './heatmap_data_converter';
|
||||
|
||||
let MIN_CARD_SIZE = 1,
|
||||
CARD_PADDING = 1,
|
||||
@@ -384,10 +384,12 @@ export default function link(scope, elem, attrs, ctrl) {
|
||||
data.buckets = mergeZeroBuckets(data.buckets, _.min(tick_values));
|
||||
}
|
||||
|
||||
let cardsData = convertToCards(data.buckets);
|
||||
let maxValue = d3.max(cardsData, card => card.count);
|
||||
let cardsData = data.cards;
|
||||
let maxValueAuto = data.cardStats.max;
|
||||
let maxValue = panel.color.max || maxValueAuto;
|
||||
let minValue = panel.color.min || 0;
|
||||
|
||||
colorScale = getColorScale(maxValue);
|
||||
colorScale = getColorScale(maxValue, minValue);
|
||||
setOpacityScale(maxValue);
|
||||
setCardSize();
|
||||
|
||||
@@ -434,14 +436,14 @@ export default function link(scope, elem, attrs, ctrl) {
|
||||
.style("stroke-width", 0);
|
||||
}
|
||||
|
||||
function getColorScale(maxValue) {
|
||||
function getColorScale(maxValue, minValue = 0) {
|
||||
let colorScheme = _.find(ctrl.colorSchemes, {value: panel.color.colorScheme});
|
||||
let colorInterpolator = d3[colorScheme.value];
|
||||
let colorScaleInverted = colorScheme.invert === 'always' ||
|
||||
(colorScheme.invert === 'dark' && !contextSrv.user.lightTheme);
|
||||
|
||||
let start = colorScaleInverted ? maxValue : 0;
|
||||
let end = colorScaleInverted ? 0 : maxValue;
|
||||
let start = colorScaleInverted ? maxValue : minValue;
|
||||
let end = colorScaleInverted ? minValue : maxValue;
|
||||
|
||||
return d3.scaleSequential(colorInterpolator).domain([start, end]);
|
||||
}
|
||||
@@ -704,78 +706,11 @@ export default function link(scope, elem, attrs, ctrl) {
|
||||
}
|
||||
}
|
||||
|
||||
function drawColorLegend() {
|
||||
d3.select("#heatmap-color-legend").selectAll("rect").remove();
|
||||
|
||||
let legend = d3.select("#heatmap-color-legend");
|
||||
let legendWidth = Math.floor($(d3.select("#heatmap-color-legend").node()).outerWidth());
|
||||
let legendHeight = d3.select("#heatmap-color-legend").attr("height");
|
||||
|
||||
let legendColorScale = getColorScale(legendWidth);
|
||||
|
||||
let rangeStep = 2;
|
||||
let valuesRange = d3.range(0, legendWidth, rangeStep);
|
||||
var legendRects = legend.selectAll(".heatmap-color-legend-rect").data(valuesRange);
|
||||
|
||||
legendRects.enter().append("rect")
|
||||
.attr("x", d => d)
|
||||
.attr("y", 0)
|
||||
.attr("width", rangeStep + 1) // Overlap rectangles to prevent gaps
|
||||
.attr("height", legendHeight)
|
||||
.attr("stroke-width", 0)
|
||||
.attr("fill", d => {
|
||||
return legendColorScale(d);
|
||||
});
|
||||
}
|
||||
|
||||
function drawOpacityLegend() {
|
||||
d3.select("#heatmap-opacity-legend").selectAll("rect").remove();
|
||||
|
||||
let legend = d3.select("#heatmap-opacity-legend");
|
||||
let legendWidth = Math.floor($(d3.select("#heatmap-opacity-legend").node()).outerWidth());
|
||||
let legendHeight = d3.select("#heatmap-opacity-legend").attr("height");
|
||||
|
||||
let legendOpacityScale;
|
||||
if (panel.color.colorScale === 'linear') {
|
||||
legendOpacityScale = d3.scaleLinear()
|
||||
.domain([0, legendWidth])
|
||||
.range([0, 1]);
|
||||
} else if (panel.color.colorScale === 'sqrt') {
|
||||
legendOpacityScale = d3.scalePow().exponent(panel.color.exponent)
|
||||
.domain([0, legendWidth])
|
||||
.range([0, 1]);
|
||||
}
|
||||
|
||||
let rangeStep = 1;
|
||||
let valuesRange = d3.range(0, legendWidth, rangeStep);
|
||||
var legendRects = legend.selectAll(".heatmap-opacity-legend-rect").data(valuesRange);
|
||||
|
||||
legendRects.enter().append("rect")
|
||||
.attr("x", d => d)
|
||||
.attr("y", 0)
|
||||
.attr("width", rangeStep)
|
||||
.attr("height", legendHeight)
|
||||
.attr("stroke-width", 0)
|
||||
.attr("fill", panel.color.cardColor)
|
||||
.style("opacity", d => {
|
||||
return legendOpacityScale(d);
|
||||
});
|
||||
}
|
||||
|
||||
function render() {
|
||||
data = ctrl.data;
|
||||
panel = ctrl.panel;
|
||||
timeRange = ctrl.range;
|
||||
|
||||
// Draw only if color editor is opened
|
||||
if (!d3.select("#heatmap-color-legend").empty()) {
|
||||
drawColorLegend();
|
||||
}
|
||||
|
||||
if (!d3.select("#heatmap-opacity-legend").empty()) {
|
||||
drawOpacityLegend();
|
||||
}
|
||||
|
||||
if (!setElementHeight() || !data) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
import _ from 'lodash';
|
||||
import { describe, beforeEach, it, sinon, expect, angularMocks } from '../../../../../test/lib/common';
|
||||
import TimeSeries from 'app/core/time_series2';
|
||||
import { convertToHeatMap, elasticHistogramToHeatmap, calculateBucketSize, isHeatmapDataEqual } from '../heatmap_data_converter';
|
||||
import {convertToHeatMap, convertToCards, elasticHistogramToHeatmap,
|
||||
calculateBucketSize, isHeatmapDataEqual} from '../heatmap_data_converter';
|
||||
|
||||
describe('isHeatmapDataEqual', () => {
|
||||
let ctx: any = {};
|
||||
@@ -244,6 +245,47 @@ describe('ES Histogram converter', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertToCards', () => {
|
||||
let buckets = {};
|
||||
|
||||
beforeEach(() => {
|
||||
buckets = {
|
||||
'1422774000000': {
|
||||
x: 1422774000000,
|
||||
buckets: {
|
||||
'1': { y: 1, values: [1], count: 1, bounds: {} },
|
||||
'2': { y: 2, values: [2], count: 1, bounds: {} }
|
||||
}
|
||||
},
|
||||
'1422774060000': {
|
||||
x: 1422774060000,
|
||||
buckets: {
|
||||
'2': { y: 2, values: [2, 3], count: 2, bounds: {} }
|
||||
}
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
it('should build proper cards data', () => {
|
||||
let expectedCards = [
|
||||
{x: 1422774000000, y: 1, count: 1, values: [1], yBounds: {}},
|
||||
{x: 1422774000000, y: 2, count: 1, values: [2], yBounds: {}},
|
||||
{x: 1422774060000, y: 2, count: 2, values: [2, 3], yBounds: {}}
|
||||
];
|
||||
let {cards, cardStats} = convertToCards(buckets);
|
||||
expect(cards).to.eql(expectedCards);
|
||||
});
|
||||
|
||||
it('should build proper cards stats', () => {
|
||||
let expectedStats = {
|
||||
min: 1,
|
||||
max: 2
|
||||
};
|
||||
let {cards, cardStats} = convertToCards(buckets);
|
||||
expect(cardStats).to.eql(expectedStats);
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
* Compare two numbers with given precision. Suitable for compare float numbers after conversions with precision loss.
|
||||
* @param a
|
||||
|
||||
@@ -11,8 +11,7 @@ import TimeSeries from 'app/core/time_series2';
|
||||
import moment from 'moment';
|
||||
import { Emitter } from 'app/core/core';
|
||||
import rendering from '../rendering';
|
||||
import { convertToHeatMap } from '../heatmap_data_converter';
|
||||
// import d3 from 'd3';
|
||||
import {convertToHeatMap, convertToCards} from '../heatmap_data_converter';
|
||||
|
||||
describe('grafanaHeatmap', function () {
|
||||
|
||||
@@ -115,8 +114,9 @@ describe('grafanaHeatmap', function () {
|
||||
let bucketsData = convertToHeatMap(ctx.series, ctx.data.yBucketSize, ctx.data.xBucketSize, logBase);
|
||||
ctx.data.buckets = bucketsData;
|
||||
|
||||
// console.log("bucketsData", bucketsData);
|
||||
// console.log("series", ctrl.panel.yAxis.logBase, ctx.series.length);
|
||||
let {cards, cardStats} = convertToCards(bucketsData);
|
||||
ctx.data.cards = cards;
|
||||
ctx.data.cardStats = cardStats;
|
||||
|
||||
let elemHtml = `
|
||||
<div class="heatmap-wrapper">
|
||||
|
||||
@@ -17,9 +17,17 @@
|
||||
<span>{{column.text}}</span>
|
||||
</label>
|
||||
</div>
|
||||
<div class="gf-form">
|
||||
<div class="gf-form" ng-show="editor.canSetColumns">
|
||||
<metric-segment segment="editor.addColumnSegment" get-options="editor.getColumnOptions()" on-change="editor.addColumn()"></metric-segment>
|
||||
</div>
|
||||
<div class="gf-form" ng-hide="editor.canSetColumns">
|
||||
<label class="gf-form-label">
|
||||
Auto
|
||||
<info-popover mode="right-normal" ng-if="editor.columnsHelpMessage">
|
||||
{{editor.columnsHelpMessage}}
|
||||
</info-popover>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user