mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge pull request #13300 from bergquist/devenv_docker
Moves docker/ to devenv/docker
This commit is contained in:
56
devenv/create_docker_compose.sh
Executable file
56
devenv/create_docker_compose.sh
Executable file
@@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
blocks_dir=docker/blocks
|
||||
docker_dir=docker
|
||||
template_dir=templates
|
||||
|
||||
grafana_config_file=conf.tmp
|
||||
grafana_config=config
|
||||
|
||||
compose_header_file=docker/compose_header.yml
|
||||
fig_file=docker-compose.yaml
|
||||
fig_config=docker-compose.yaml
|
||||
|
||||
if [ "$#" == 0 ]; then
|
||||
blocks=`ls $blocks_dir`
|
||||
if [ -z "$blocks" ]; then
|
||||
echo "No Blocks available in $blocks_dir"
|
||||
else
|
||||
echo "Available Blocks:"
|
||||
for block in $blocks; do
|
||||
echo " $block"
|
||||
done
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for file in $grafana_config_file $fig_file; do
|
||||
if [ -e $file ]; then
|
||||
echo "Deleting $file"
|
||||
rm $file
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Adding Compose header to $fig_file"
|
||||
cat $compose_header_file >> $fig_file
|
||||
|
||||
for dir in $@; do
|
||||
current_dir=$blocks_dir/$dir
|
||||
if [ ! -d "$current_dir" ]; then
|
||||
echo "$current_dir is not a directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -e $current_dir/$grafana_config ]; then
|
||||
echo "Adding $current_dir/$grafana_config to $grafana_config_file"
|
||||
cat $current_dir/$grafana_config >> $grafana_config_file
|
||||
echo "" >> $grafana_config_file
|
||||
fi
|
||||
|
||||
if [ -e $current_dir/$fig_config ]; then
|
||||
echo "Adding $current_dir/$fig_config to $fig_file"
|
||||
cat $current_dir/$fig_config >> $fig_file
|
||||
echo "" >> $fig_file
|
||||
fi
|
||||
done
|
||||
|
4
devenv/docker/blocks/apache_proxy/Dockerfile
Normal file
4
devenv/docker/blocks/apache_proxy/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM jmferrer/apache2-reverse-proxy:latest
|
||||
|
||||
COPY ports.conf /etc/apache2/sites-enabled
|
||||
COPY proxy.conf /etc/apache2/sites-enabled
|
1
devenv/docker/blocks/apache_proxy/ports.conf
Normal file
1
devenv/docker/blocks/apache_proxy/ports.conf
Normal file
@@ -0,0 +1 @@
|
||||
Listen 10081
|
4
devenv/docker/blocks/apache_proxy/proxy.conf
Normal file
4
devenv/docker/blocks/apache_proxy/proxy.conf
Normal file
@@ -0,0 +1,4 @@
|
||||
<VirtualHost *:10081>
|
||||
ProxyPass /grafana/ http://localhost:3000/
|
||||
ProxyPassReverse /grafana/ http://localhost:3000/
|
||||
</VirtualHost>
|
16
devenv/docker/blocks/collectd/Dockerfile
Normal file
16
devenv/docker/blocks/collectd/Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM ubuntu:xenial
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
RUN apt-get -y update
|
||||
RUN apt-get -y install collectd curl python-pip
|
||||
|
||||
# add a fake mtab for host disk stats
|
||||
ADD etc_mtab /etc/mtab
|
||||
|
||||
ADD collectd.conf.tpl /etc/collectd/collectd.conf.tpl
|
||||
|
||||
RUN pip install envtpl
|
||||
ADD start_container /usr/bin/start_container
|
||||
RUN chmod +x /usr/bin/start_container
|
||||
CMD start_container
|
37
devenv/docker/blocks/collectd/README.md
Normal file
37
devenv/docker/blocks/collectd/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
collectd-write-graphite
|
||||
=======================
|
||||
|
||||
Basic collectd-based server monitoring. Sends stats to Graphite.
|
||||
|
||||
Collectd metrics:
|
||||
|
||||
* CPU used/free/idle/etc
|
||||
* Free disk (via mounting hosts '/' into container, eg: -v /:/hostfs:ro)
|
||||
* Disk performance
|
||||
* Load average
|
||||
* Memory used/free/etc
|
||||
* Uptime
|
||||
* Network interface
|
||||
* Swap
|
||||
|
||||
Environment variables
|
||||
---------------------
|
||||
|
||||
* `HOST_NAME`
|
||||
- Will be sent to Graphite
|
||||
- Required
|
||||
* `GRAPHITE_HOST`
|
||||
- Graphite IP or hostname
|
||||
- Required
|
||||
* `GRAPHITE_PORT`
|
||||
- Graphite port
|
||||
- Optional, defaults to 2003
|
||||
* `GRAPHITE_PREFIX`
|
||||
- Graphite prefix
|
||||
- Optional, defaults to collectd.
|
||||
* `REPORT_BY_CPU`
|
||||
- Report per-CPU metrics if true, global sum of CPU metrics if false (details: [collectd.conf man page](https://collectd.org/documentation/manpages/collectd.conf.5.shtml#plugin_cpu))
|
||||
- Optional, defaults to false.
|
||||
* `COLLECT_INTERVAL`
|
||||
- Collection interval and thus resolution of metrics
|
||||
- Optional, defaults to 10
|
106
devenv/docker/blocks/collectd/collectd.conf.tpl
Normal file
106
devenv/docker/blocks/collectd/collectd.conf.tpl
Normal file
@@ -0,0 +1,106 @@
|
||||
Hostname "{{ HOST_NAME }}"
|
||||
|
||||
FQDNLookup false
|
||||
Interval {{ COLLECT_INTERVAL | default("10") }}
|
||||
Timeout 2
|
||||
ReadThreads 5
|
||||
|
||||
LoadPlugin cpu
|
||||
LoadPlugin df
|
||||
LoadPlugin load
|
||||
LoadPlugin memory
|
||||
LoadPlugin disk
|
||||
LoadPlugin interface
|
||||
LoadPlugin uptime
|
||||
LoadPlugin swap
|
||||
LoadPlugin write_graphite
|
||||
LoadPlugin processes
|
||||
LoadPlugin aggregation
|
||||
LoadPlugin match_regex
|
||||
# LoadPlugin memcached
|
||||
|
||||
<Plugin df>
|
||||
# expose host's mounts into container using -v /:/host:ro (location inside container does not matter much)
|
||||
# ignore rootfs; else, the root file-system would appear twice, causing
|
||||
# one of the updates to fail and spam the log
|
||||
FSType rootfs
|
||||
# ignore the usual virtual / temporary file-systems
|
||||
FSType sysfs
|
||||
FSType proc
|
||||
FSType devtmpfs
|
||||
FSType devpts
|
||||
FSType tmpfs
|
||||
FSType fusectl
|
||||
FSType cgroup
|
||||
FSType overlay
|
||||
FSType debugfs
|
||||
FSType pstore
|
||||
FSType securityfs
|
||||
FSType hugetlbfs
|
||||
FSType squashfs
|
||||
FSType mqueue
|
||||
MountPoint "/etc/resolv.conf"
|
||||
MountPoint "/etc/hostname"
|
||||
MountPoint "/etc/hosts"
|
||||
IgnoreSelected true
|
||||
ReportByDevice false
|
||||
ReportReserved true
|
||||
ReportInodes true
|
||||
ValuesAbsolute true
|
||||
ValuesPercentage true
|
||||
ReportInodes true
|
||||
</Plugin>
|
||||
|
||||
<Plugin "disk">
|
||||
Disk "/^[hs]d[a-z]/"
|
||||
IgnoreSelected false
|
||||
</Plugin>
|
||||
|
||||
<Plugin "aggregation">
|
||||
<Aggregation>
|
||||
Plugin "cpu"
|
||||
Type "cpu"
|
||||
GroupBy "Host"
|
||||
GroupBy "TypeInstance"
|
||||
CalculateAverage true
|
||||
</Aggregation>
|
||||
</Plugin>
|
||||
|
||||
<Plugin interface>
|
||||
Interface "lo"
|
||||
Interface "/^veth.*/"
|
||||
Interface "/^docker.*/"
|
||||
IgnoreSelected true
|
||||
</Plugin>
|
||||
|
||||
# <Plugin "memcached">
|
||||
# Host "memcached"
|
||||
# Port "11211"
|
||||
# </Plugin>
|
||||
|
||||
<Chain "PostCache">
|
||||
<Rule>
|
||||
<Match regex>
|
||||
Plugin "^cpu$"
|
||||
PluginInstance "^[0-9]+$"
|
||||
</Match>
|
||||
<Target write>
|
||||
Plugin "aggregation"
|
||||
</Target>
|
||||
Target stop
|
||||
</Rule>
|
||||
Target "write"
|
||||
</Chain>
|
||||
|
||||
<Plugin "write_graphite">
|
||||
<Carbon>
|
||||
Host "{{ GRAPHITE_HOST }}"
|
||||
Port "{{ GRAPHITE_PORT | default("2003") }}"
|
||||
Prefix "{{ GRAPHITE_PREFIX | default("collectd.") }}"
|
||||
EscapeCharacter "_"
|
||||
SeparateInstances true
|
||||
StoreRates true
|
||||
AlwaysAppendDS false
|
||||
</Carbon>
|
||||
</Plugin>
|
||||
|
1
devenv/docker/blocks/collectd/etc_mtab
Normal file
1
devenv/docker/blocks/collectd/etc_mtab
Normal file
@@ -0,0 +1 @@
|
||||
hostfs /.dockerinit ext4 ro,relatime,user_xattr,barrier=1,data=ordered 0 0
|
5
devenv/docker/blocks/collectd/start_container
Normal file
5
devenv/docker/blocks/collectd/start_container
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
envtpl /etc/collectd/collectd.conf.tpl
|
||||
|
||||
collectd -f
|
2
devenv/docker/blocks/elastic/elasticsearch.yml
Normal file
2
devenv/docker/blocks/elastic/elasticsearch.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
script.inline: on
|
||||
script.indexed: on
|
2
devenv/docker/blocks/elastic1/elasticsearch.yml
Normal file
2
devenv/docker/blocks/elastic1/elasticsearch.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
script.inline: on
|
||||
script.indexed: on
|
2
devenv/docker/blocks/elastic5/elasticsearch.yml
Normal file
2
devenv/docker/blocks/elastic5/elasticsearch.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
script.inline: on
|
||||
script.indexed: on
|
2
devenv/docker/blocks/elastic6/elasticsearch.yml
Normal file
2
devenv/docker/blocks/elastic6/elasticsearch.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
script.inline: on
|
||||
script.indexed: on
|
52
devenv/docker/blocks/graphite/Dockerfile
Normal file
52
devenv/docker/blocks/graphite/Dockerfile
Normal file
@@ -0,0 +1,52 @@
|
||||
from ubuntu:14.04
|
||||
|
||||
run apt-get -y update
|
||||
|
||||
run apt-get -y install libcairo2-dev libffi-dev pkg-config python-dev python-pip fontconfig apache2 libapache2-mod-wsgi git-core collectd memcached gcc g++ make supervisor nginx-light gunicorn
|
||||
|
||||
run cd /usr/local/src && git clone https://github.com/graphite-project/graphite-web.git
|
||||
run cd /usr/local/src && git clone https://github.com/graphite-project/carbon.git
|
||||
run cd /usr/local/src && git clone https://github.com/graphite-project/whisper.git
|
||||
|
||||
run cd /usr/local/src/whisper && git checkout master && python setup.py install
|
||||
run cd /usr/local/src/carbon && git checkout 0.9.x && pip install -r requirements.txt; python setup.py install
|
||||
run cd /usr/local/src/graphite-web && git checkout 0.9.x && pip install -r requirements.txt; python check-dependencies.py; python setup.py install
|
||||
|
||||
# Add graphite config
|
||||
add ./files/initial_data.json /opt/graphite/webapp/graphite/initial_data.json
|
||||
add ./files/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
add ./files/carbon.conf /opt/graphite/conf/carbon.conf
|
||||
add ./files/storage-schemas.conf /opt/graphite/conf/storage-schemas.conf
|
||||
add ./files/storage-aggregation.conf /opt/graphite/conf/storage-aggregation.conf
|
||||
add ./files/events_views.py /opt/graphite/webapp/graphite/events/views.py
|
||||
|
||||
run mkdir -p /opt/graphite/storage/whisper
|
||||
run touch /opt/graphite/storage/graphite.db /opt/graphite/storage/index
|
||||
run chown -R www-data /opt/graphite/storage
|
||||
run chmod 0775 /opt/graphite/storage /opt/graphite/storage/whisper
|
||||
run chmod 0664 /opt/graphite/storage/graphite.db
|
||||
run cd /opt/graphite/webapp/graphite && python manage.py syncdb --noinput
|
||||
|
||||
add ./files/my_htpasswd /etc/nginx/.htpasswd
|
||||
|
||||
# Add system service config
|
||||
add ./files/nginx.conf /etc/nginx/nginx.conf
|
||||
add ./files/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
# Nginx
|
||||
#
|
||||
# graphite
|
||||
expose 80
|
||||
|
||||
# Carbon line receiver port
|
||||
expose 2003
|
||||
|
||||
# Carbon cache query port
|
||||
expose 7002
|
||||
|
||||
VOLUME ["/opt/graphite/storage/whisper"]
|
||||
VOLUME ["/var/lib/log/supervisor"]
|
||||
|
||||
cmd ["/usr/bin/supervisord"]
|
||||
|
||||
# vim:ts=8:noet:
|
76
devenv/docker/blocks/graphite/files/carbon.conf
Normal file
76
devenv/docker/blocks/graphite/files/carbon.conf
Normal file
@@ -0,0 +1,76 @@
|
||||
[cache]
|
||||
LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
LOG_UPDATES = False
|
||||
|
||||
# Enable AMQP if you want to receive metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
102
devenv/docker/blocks/graphite/files/events_views.py
Normal file
102
devenv/docker/blocks/graphite/files/events_views.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from django.utils.timezone import get_current_timezone
|
||||
from django.core.urlresolvers import get_script_prefix
|
||||
from django.http import HttpResponse
|
||||
from django.shortcuts import render_to_response, get_object_or_404
|
||||
from pytz import timezone
|
||||
|
||||
from graphite.util import json
|
||||
from graphite.events import models
|
||||
from graphite.render.attime import parseATTime
|
||||
|
||||
|
||||
def to_timestamp(dt):
|
||||
return time.mktime(dt.timetuple())
|
||||
|
||||
|
||||
class EventEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return to_timestamp(obj)
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def view_events(request):
|
||||
if request.method == "GET":
|
||||
context = { 'events' : fetch(request),
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("events.html", context)
|
||||
else:
|
||||
return post_event(request)
|
||||
|
||||
def detail(request, event_id):
|
||||
e = get_object_or_404(models.Event, pk=event_id)
|
||||
context = { 'event' : e,
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("event.html", context)
|
||||
|
||||
|
||||
def post_event(request):
|
||||
if request.method == 'POST':
|
||||
event = json.loads(request.body)
|
||||
assert isinstance(event, dict)
|
||||
|
||||
values = {}
|
||||
values["what"] = event["what"]
|
||||
values["tags"] = event.get("tags", None)
|
||||
values["when"] = datetime.datetime.fromtimestamp(
|
||||
event.get("when", time.time()))
|
||||
if "data" in event:
|
||||
values["data"] = event["data"]
|
||||
|
||||
e = models.Event(**values)
|
||||
e.save()
|
||||
|
||||
return HttpResponse(status=200)
|
||||
else:
|
||||
return HttpResponse(status=405)
|
||||
|
||||
def get_data(request):
|
||||
if 'jsonp' in request.REQUEST:
|
||||
response = HttpResponse(
|
||||
"%s(%s)" % (request.REQUEST.get('jsonp'),
|
||||
json.dumps(fetch(request), cls=EventEncoder)),
|
||||
mimetype='text/javascript')
|
||||
else:
|
||||
response = HttpResponse(
|
||||
json.dumps(fetch(request), cls=EventEncoder),
|
||||
mimetype="application/json")
|
||||
return response
|
||||
|
||||
def fetch(request):
|
||||
#XXX we need to move to USE_TZ=True to get rid of naive-time conversions
|
||||
def make_naive(dt):
|
||||
if 'tz' in request.GET:
|
||||
tz = timezone(request.GET['tz'])
|
||||
else:
|
||||
tz = get_current_timezone()
|
||||
local_dt = dt.astimezone(tz)
|
||||
if hasattr(local_dt, 'normalize'):
|
||||
local_dt = local_dt.normalize()
|
||||
return local_dt.replace(tzinfo=None)
|
||||
|
||||
if request.GET.get("from", None) is not None:
|
||||
time_from = make_naive(parseATTime(request.GET["from"]))
|
||||
else:
|
||||
time_from = datetime.datetime.fromtimestamp(0)
|
||||
|
||||
if request.GET.get("until", None) is not None:
|
||||
time_until = make_naive(parseATTime(request.GET["until"]))
|
||||
else:
|
||||
time_until = datetime.datetime.now()
|
||||
|
||||
tags = request.GET.get("tags", None)
|
||||
if tags is not None:
|
||||
tags = request.GET.get("tags").split(" ")
|
||||
|
||||
return [x.as_dict() for x in
|
||||
models.Event.find_events(time_from, time_until, tags=tags)]
|
20
devenv/docker/blocks/graphite/files/initial_data.json
Normal file
20
devenv/docker/blocks/graphite/files/initial_data.json
Normal file
@@ -0,0 +1,20 @@
|
||||
[
|
||||
{
|
||||
"pk": 1,
|
||||
"model": "auth.user",
|
||||
"fields": {
|
||||
"username": "admin",
|
||||
"first_name": "",
|
||||
"last_name": "",
|
||||
"is_active": true,
|
||||
"is_superuser": true,
|
||||
"is_staff": true,
|
||||
"last_login": "2011-09-20 17:02:14",
|
||||
"groups": [],
|
||||
"user_permissions": [],
|
||||
"password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236",
|
||||
"email": "root@example.com",
|
||||
"date_joined": "2011-09-20 17:02:14"
|
||||
}
|
||||
}
|
||||
]
|
42
devenv/docker/blocks/graphite/files/local_settings.py
Normal file
42
devenv/docker/blocks/graphite/files/local_settings.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# Edit this file to override the default graphite settings, do not edit settings.py
|
||||
|
||||
# Turn on debugging and restart apache if you ever see an "Internal Server Error" page
|
||||
#DEBUG = True
|
||||
|
||||
# Set your local timezone (django will try to figure this out automatically)
|
||||
TIME_ZONE = 'UTC'
|
||||
|
||||
# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely
|
||||
#MEMCACHE_HOSTS = ['127.0.0.1:11211']
|
||||
|
||||
# Sometimes you need to do a lot of rendering work but cannot share your storage mount
|
||||
#REMOTE_RENDERING = True
|
||||
#RENDERING_HOSTS = ['fastserver01','fastserver02']
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
|
||||
# If you've got more than one backend server they should all be listed here
|
||||
#CLUSTER_SERVERS = []
|
||||
|
||||
# Override this if you need to provide documentation specific to your graphite deployment
|
||||
#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite"
|
||||
|
||||
# Enable email-related features
|
||||
#SMTP_SERVER = "mail.mycompany.com"
|
||||
|
||||
# LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
|
||||
# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!)
|
||||
#DATABASE_ENGINE = 'mysql' # or 'postgres'
|
||||
#DATABASE_NAME = 'graphite'
|
||||
#DATABASE_USER = 'graphite'
|
||||
#DATABASE_PASSWORD = 'graphite-is-awesome'
|
||||
#DATABASE_HOST = 'mysql.mycompany.com'
|
||||
#DATABASE_PORT = '3306'
|
1
devenv/docker/blocks/graphite/files/my_htpasswd
Normal file
1
devenv/docker/blocks/graphite/files/my_htpasswd
Normal file
@@ -0,0 +1 @@
|
||||
grafana:$apr1$4R/20xhC$8t37jPP5dbcLr48btdkU//
|
70
devenv/docker/blocks/graphite/files/nginx.conf
Normal file
70
devenv/docker/blocks/graphite/files/nginx.conf
Normal file
@@ -0,0 +1,70 @@
|
||||
daemon off;
|
||||
user www-data;
|
||||
worker_processes 1;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
server_tokens off;
|
||||
|
||||
server_names_hash_bucket_size 32;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
|
||||
open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Server $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
client_max_body_size 10m;
|
||||
client_body_buffer_size 128k;
|
||||
|
||||
proxy_connect_timeout 90;
|
||||
proxy_send_timeout 90;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 32k;
|
||||
proxy_busy_buffers_size 64k;
|
||||
proxy_temp_file_write_size 64k;
|
||||
}
|
||||
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
add_header Access-Control-Allow-Methods "GET, OPTIONS";
|
||||
add_header Access-Control-Allow-Headers "origin, authorization, accept";
|
||||
|
||||
location /content {
|
||||
alias /opt/graphite/webapp/content;
|
||||
|
||||
}
|
||||
|
||||
location /media {
|
||||
alias /usr/share/pyshared/django/contrib/admin/media;
|
||||
}
|
||||
}
|
||||
}
|
8
devenv/docker/blocks/graphite/files/statsd_config.js
Normal file
8
devenv/docker/blocks/graphite/files/statsd_config.js
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
graphitePort: 2003,
|
||||
graphiteHost: "127.0.0.1",
|
||||
port: 8125,
|
||||
mgmt_port: 8126,
|
||||
backends: ['./backends/graphite'],
|
||||
debug: true
|
||||
}
|
19
devenv/docker/blocks/graphite/files/storage-aggregation.conf
Normal file
19
devenv/docker/blocks/graphite/files/storage-aggregation.conf
Normal file
@@ -0,0 +1,19 @@
|
||||
[min]
|
||||
pattern = \.min$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.max$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.5
|
||||
aggregationMethod = average
|
16
devenv/docker/blocks/graphite/files/storage-schemas.conf
Normal file
16
devenv/docker/blocks/graphite/files/storage-schemas.conf
Normal file
@@ -0,0 +1,16 @@
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
26
devenv/docker/blocks/graphite/files/supervisord.conf
Normal file
26
devenv/docker/blocks/graphite/files/supervisord.conf
Normal file
@@ -0,0 +1,26 @@
|
||||
[supervisord]
|
||||
nodaemon = true
|
||||
environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf'
|
||||
|
||||
[program:nginx]
|
||||
command = /usr/sbin/nginx
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:carbon-cache]
|
||||
;user = www-data
|
||||
command = /opt/graphite/bin/carbon-cache.py --debug start
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:graphite-webapp]
|
||||
;user = www-data
|
||||
directory = /opt/graphite/webapp
|
||||
environment = PYTHONPATH='/opt/graphite/webapp'
|
||||
command = /usr/bin/gunicorn_django -b127.0.0.1:8000 -w2 graphite/settings.py
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
124
devenv/docker/blocks/graphite1/Dockerfile
Normal file
124
devenv/docker/blocks/graphite1/Dockerfile
Normal file
@@ -0,0 +1,124 @@
|
||||
FROM phusion/baseimage:0.9.22
|
||||
MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com>
|
||||
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y install vim \
|
||||
nginx \
|
||||
python-dev \
|
||||
python-flup \
|
||||
python-pip \
|
||||
python-ldap \
|
||||
expect \
|
||||
git \
|
||||
memcached \
|
||||
sqlite3 \
|
||||
libffi-dev \
|
||||
libcairo2 \
|
||||
libcairo2-dev \
|
||||
python-cairo \
|
||||
python-rrdtool \
|
||||
pkg-config \
|
||||
nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# choose a timezone at build-time
|
||||
# use `--build-arg CONTAINER_TIMEZONE=Europe/Brussels` in `docker build`
|
||||
ARG CONTAINER_TIMEZONE
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
RUN if [ ! -z "${CONTAINER_TIMEZONE}" ]; \
|
||||
then ln -sf /usr/share/zoneinfo/$CONTAINER_TIMEZONE /etc/localtime && \
|
||||
dpkg-reconfigure -f noninteractive tzdata; \
|
||||
fi
|
||||
|
||||
# fix python dependencies (LTS Django and newer memcached/txAMQP)
|
||||
RUN pip install --upgrade pip && \
|
||||
pip install django==1.8.18 \
|
||||
python-memcached==1.53 \
|
||||
txAMQP==0.6.2
|
||||
|
||||
ARG version=1.0.2
|
||||
ARG whisper_version=${version}
|
||||
ARG carbon_version=${version}
|
||||
ARG graphite_version=${version}
|
||||
|
||||
RUN echo "Building Version: $version"
|
||||
|
||||
ARG whisper_repo=https://github.com/graphite-project/whisper.git
|
||||
ARG carbon_repo=https://github.com/graphite-project/carbon.git
|
||||
ARG graphite_repo=https://github.com/graphite-project/graphite-web.git
|
||||
|
||||
ARG statsd_version=v0.8.0
|
||||
|
||||
ARG statsd_repo=https://github.com/etsy/statsd.git
|
||||
|
||||
# install whisper
|
||||
RUN git clone -b ${whisper_version} --depth 1 ${whisper_repo} /usr/local/src/whisper
|
||||
WORKDIR /usr/local/src/whisper
|
||||
RUN python ./setup.py install
|
||||
|
||||
# install carbon
|
||||
RUN git clone -b ${carbon_version} --depth 1 ${carbon_repo} /usr/local/src/carbon
|
||||
WORKDIR /usr/local/src/carbon
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install graphite
|
||||
RUN git clone -b ${graphite_version} --depth 1 ${graphite_repo} /usr/local/src/graphite-web
|
||||
WORKDIR /usr/local/src/graphite-web
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install statsd
|
||||
RUN git clone -b ${statsd_version} ${statsd_repo} /opt/statsd
|
||||
|
||||
# config graphite
|
||||
ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
# ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
|
||||
WORKDIR /opt/graphite/webapp
|
||||
RUN mkdir -p /var/log/graphite/ \
|
||||
&& PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings
|
||||
|
||||
# config statsd
|
||||
ADD conf/opt/statsd/config.js /opt/statsd/
|
||||
|
||||
# config nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
ADD conf/etc/nginx/nginx.conf /etc/nginx/nginx.conf
|
||||
ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/graphite-statsd.conf
|
||||
|
||||
# init django admin
|
||||
ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp
|
||||
ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh
|
||||
RUN chmod +x /usr/local/bin/manage.sh && /usr/local/bin/django_admin_init.exp
|
||||
|
||||
# logging support
|
||||
RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx
|
||||
ADD conf/etc/logrotate.d/graphite-statsd /etc/logrotate.d/graphite-statsd
|
||||
|
||||
# daemons
|
||||
ADD conf/etc/service/carbon/run /etc/service/carbon/run
|
||||
ADD conf/etc/service/carbon-aggregator/run /etc/service/carbon-aggregator/run
|
||||
ADD conf/etc/service/graphite/run /etc/service/graphite/run
|
||||
ADD conf/etc/service/statsd/run /etc/service/statsd/run
|
||||
ADD conf/etc/service/nginx/run /etc/service/nginx/run
|
||||
|
||||
# default conf setup
|
||||
ADD conf /etc/graphite-statsd/conf
|
||||
ADD conf/etc/my_init.d/01_conf_init.sh /etc/my_init.d/01_conf_init.sh
|
||||
|
||||
# cleanup
|
||||
RUN apt-get clean\
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# defaults
|
||||
EXPOSE 80 2003-2004 2023-2024 8125 8125/udp 8126
|
||||
VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"]
|
||||
WORKDIR /
|
||||
ENV HOME /root
|
||||
ENV STATSD_INTERFACE udp
|
||||
|
||||
CMD ["/sbin/my_init"]
|
1161
devenv/docker/blocks/graphite1/big-dashboard.json
Normal file
1161
devenv/docker/blocks/graphite1/big-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,11 @@
|
||||
/var/log/*.log /var/log/*/*.log {
|
||||
weekly
|
||||
size 50M
|
||||
missingok
|
||||
rotate 10
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
copytruncate
|
||||
su root syslog
|
||||
}
|
36
devenv/docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
Executable file
36
devenv/docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
conf_dir=/etc/graphite-statsd/conf
|
||||
|
||||
# auto setup graphite with default configs if /opt/graphite is missing
|
||||
# needed for the use case when a docker host volume is mounted at an of the following:
|
||||
# - /opt/graphite
|
||||
# - /opt/graphite/conf
|
||||
# - /opt/graphite/webapp/graphite
|
||||
graphite_dir_contents=$(find /opt/graphite -mindepth 1 -print -quit)
|
||||
graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit)
|
||||
graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit)
|
||||
graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit)
|
||||
if [[ -z $graphite_dir_contents ]]; then
|
||||
# git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
cd /usr/local/src/graphite-web && python ./setup.py install
|
||||
fi
|
||||
if [[ -z $graphite_storage_dir_contents ]]; then
|
||||
/usr/local/bin/django_admin_init.exp
|
||||
fi
|
||||
if [[ -z $graphite_conf_dir_contents ]]; then
|
||||
cp -R $conf_dir/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
fi
|
||||
if [[ -z $graphite_webapp_dir_contents ]]; then
|
||||
cp $conf_dir/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
fi
|
||||
|
||||
# auto setup statsd with default config if /opt/statsd is missing
|
||||
# needed for the use case when a docker host volume is mounted at an of the following:
|
||||
# - /opt/statsd
|
||||
statsd_dir_contents=$(find /opt/statsd -mindepth 1 -print -quit)
|
||||
if [[ -z $statsd_dir_contents ]]; then
|
||||
git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
cp $conf_dir/opt/statsd/config.js /opt/statsd/config.js
|
||||
fi
|
||||
|
96
devenv/docker/blocks/graphite1/conf/etc/nginx/nginx.conf
Normal file
96
devenv/docker/blocks/graphite1/conf/etc/nginx/nginx.conf
Normal file
@@ -0,0 +1,96 @@
|
||||
user www-data;
|
||||
worker_processes 4;
|
||||
pid /run/nginx.pid;
|
||||
daemon off;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
##
|
||||
# nginx-naxsi config
|
||||
##
|
||||
# Uncomment it if you installed nginx-naxsi
|
||||
##
|
||||
|
||||
#include /etc/nginx/naxsi_core.rules;
|
||||
|
||||
##
|
||||
# nginx-passenger config
|
||||
##
|
||||
# Uncomment it if you installed nginx-passenger
|
||||
##
|
||||
|
||||
#passenger_root /usr;
|
||||
#passenger_ruby /usr/bin/ruby;
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
||||
|
||||
|
||||
#mail {
|
||||
# # See sample authentication script at:
|
||||
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
|
||||
#
|
||||
# # auth_http localhost/auth.php;
|
||||
# # pop3_capabilities "TOP" "USER";
|
||||
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
|
||||
#
|
||||
# server {
|
||||
# listen localhost:110;
|
||||
# protocol pop3;
|
||||
# proxy on;
|
||||
# }
|
||||
#
|
||||
# server {
|
||||
# listen localhost:143;
|
||||
# protocol imap;
|
||||
# proxy on;
|
||||
# }
|
||||
#}
|
@@ -0,0 +1,31 @@
|
||||
server {
|
||||
listen 80;
|
||||
root /opt/graphite/static;
|
||||
index index.html;
|
||||
|
||||
location /media {
|
||||
# django admin static files
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/media/;
|
||||
}
|
||||
|
||||
location /admin/auth/admin {
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
|
||||
}
|
||||
|
||||
location /admin/auth/user/admin {
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type';
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
}
|
||||
|
||||
}
|
4
devenv/docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
Executable file
4
devenv/docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /opt/graphite/storage/carbon-aggregator-a.pid
|
||||
exec /usr/bin/python /opt/graphite/bin/carbon-aggregator.py start --debug 2>&1 >> /var/log/carbon-aggregator.log
|
4
devenv/docker/blocks/graphite1/conf/etc/service/carbon/run
Executable file
4
devenv/docker/blocks/graphite1/conf/etc/service/carbon/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /opt/graphite/storage/carbon-cache-a.pid
|
||||
exec /usr/bin/python /opt/graphite/bin/carbon-cache.py start --debug 2>&1 >> /var/log/carbon.log
|
3
devenv/docker/blocks/graphite1/conf/etc/service/graphite/run
Executable file
3
devenv/docker/blocks/graphite1/conf/etc/service/graphite/run
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
export PYTHONPATH=/opt/graphite/webapp && exec /usr/local/bin/gunicorn wsgi --workers=4 --bind=127.0.0.1:8080 --log-file=/var/log/gunicorn.log --preload --pythonpath=/opt/graphite/webapp/graphite
|
4
devenv/docker/blocks/graphite1/conf/etc/service/nginx/run
Executable file
4
devenv/docker/blocks/graphite1/conf/etc/service/nginx/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
mkdir -p /var/log/nginx
|
||||
exec /usr/sbin/nginx -c /etc/nginx/nginx.conf
|
4
devenv/docker/blocks/graphite1/conf/etc/service/statsd/run
Executable file
4
devenv/docker/blocks/graphite1/conf/etc/service/statsd/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec /usr/bin/nodejs /opt/statsd/stats.js /opt/statsd/config.js >> /var/log/statsd.log 2>&1
|
||||
|
@@ -0,0 +1,35 @@
|
||||
# The form of each line in this file should be as follows:
|
||||
#
|
||||
# output_template (frequency) = method input_pattern
|
||||
#
|
||||
# This will capture any received metrics that match 'input_pattern'
|
||||
# for calculating an aggregate metric. The calculation will occur
|
||||
# every 'frequency' seconds and the 'method' can specify 'sum' or
|
||||
# 'avg'. The name of the aggregate metric will be derived from
|
||||
# 'output_template' filling in any captured fields from 'input_pattern'.
|
||||
#
|
||||
# For example, if you're metric naming scheme is:
|
||||
#
|
||||
# <env>.applications.<app>.<server>.<metric>
|
||||
#
|
||||
# You could configure some aggregations like so:
|
||||
#
|
||||
# <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
|
||||
# <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
|
||||
#
|
||||
# As an example, if the following metrics are received:
|
||||
#
|
||||
# prod.applications.apache.www01.requests
|
||||
# prod.applications.apache.www01.requests
|
||||
#
|
||||
# They would all go into the same aggregation buffer and after 60 seconds the
|
||||
# aggregate metric 'prod.applications.apache.all.requests' would be calculated
|
||||
# by summing their values.
|
||||
#
|
||||
# Template components such as <env> will match everything up to the next dot.
|
||||
# To match metric multiple components including the dots, use <<metric>> in the
|
||||
# input template:
|
||||
#
|
||||
# <env>.applications.<app>.all.<app_metric> (60) = sum <env>.applications.<app>.*.<<app_metric>>
|
||||
#
|
||||
# Note that any time this file is modified, it will be re-read automatically.
|
@@ -0,0 +1,5 @@
|
||||
# This file takes a single regular expression per line
|
||||
# If USE_WHITELIST is set to True in carbon.conf, any metrics received which
|
||||
# match one of these expressions will be dropped
|
||||
# This file is reloaded automatically when changes are made
|
||||
^some\.noisy\.metric\.prefix\..*
|
@@ -0,0 +1,75 @@
|
||||
# This is a configuration file with AMQP enabled
|
||||
|
||||
[cache]
|
||||
LOCAL_DATA_DIR =
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Enable AMQP if you want to receive metrics using you amqp broker
|
||||
ENABLE_AMQP = True
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
AMQP_VERBOSE = True
|
||||
|
||||
# your credentials for the amqp server
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
|
||||
# the network settings for the amqp server
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
|
||||
# if you want to include the metric name as part of the message body
|
||||
# instead of as the routing key, set this to True
|
||||
# AMQP_METRIC_NAME_IN_BODY = False
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
@@ -0,0 +1,594 @@
|
||||
[cache]
|
||||
# Configure carbon directories.
|
||||
#
|
||||
# OS environment variables can be used to tell carbon where graphite is
|
||||
# installed, where to read configuration from and where to write data.
|
||||
#
|
||||
# GRAPHITE_ROOT - Root directory of the graphite installation.
|
||||
# Defaults to ../
|
||||
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives).
|
||||
# Defaults to $GRAPHITE_ROOT/conf/
|
||||
# GRAPHITE_STORAGE_DIR - Storage directory for whisper/rrd/log/pid files.
|
||||
# Defaults to $GRAPHITE_ROOT/storage/
|
||||
#
|
||||
# To change other directory paths, add settings to this file. The following
|
||||
# configuration variables are available with these default values:
|
||||
#
|
||||
# STORAGE_DIR = $GRAPHITE_STORAGE_DIR
|
||||
# LOCAL_DATA_DIR = %(STORAGE_DIR)s/whisper/
|
||||
# WHITELISTS_DIR = %(STORAGE_DIR)s/lists/
|
||||
# CONF_DIR = %(STORAGE_DIR)s/conf/
|
||||
# LOG_DIR = %(STORAGE_DIR)s/log/
|
||||
# PID_DIR = %(STORAGE_DIR)s/
|
||||
#
|
||||
# For FHS style directory structures, use:
|
||||
#
|
||||
# STORAGE_DIR = /var/lib/carbon/
|
||||
# CONF_DIR = /etc/carbon/
|
||||
# LOG_DIR = /var/log/carbon/
|
||||
# PID_DIR = /var/run/
|
||||
#
|
||||
#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Specify the database library used to store metric data on disk. Each database
|
||||
# may have configurable options to change the behaviour of how it writes to
|
||||
# persistent storage.
|
||||
#
|
||||
# whisper - Fixed-size database, similar in design and purpose to RRD. This is
|
||||
# the default storage backend for carbon and the most rigorously tested.
|
||||
#
|
||||
# ceres - Experimental alternative database that supports storing data in sparse
|
||||
# files of arbitrary fixed-size resolutions.
|
||||
DATABASE = whisper
|
||||
|
||||
# Enable daily log rotation. If disabled, a new file will be opened whenever the log file path no
|
||||
# longer exists (i.e. it is removed or renamed)
|
||||
ENABLE_LOGROTATION = True
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon-cache runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
# value should be an integer number of metric datapoints.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 500
|
||||
|
||||
# If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a
|
||||
# stop/shutdown is initiated. This helps when MAX_UPDATES_PER_SECOND is
|
||||
# relatively low and carbon has cached a lot of updates; it enables the carbon
|
||||
# daemon to shutdown more quickly.
|
||||
# MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (e.g. 50) is a good way to ensure that your carbon
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that any metrics received in excess of this
|
||||
# value will be silently dropped, and the whisper file will not be created
|
||||
# until such point as a subsequent metric is received and fits within the
|
||||
# defined rate limit. Setting this value high (like "inf" for infinity) will
|
||||
# cause carbon to create the files quickly but at the risk of increased I/O.
|
||||
MAX_CREATES_PER_MINUTE = 50
|
||||
|
||||
# Set the minimum timestamp resolution supported by this instance. This allows
|
||||
# internal optimisations by overwriting points with equal truncated timestamps
|
||||
# in order to limit the number of updates to the database. It defaults to one
|
||||
# second.
|
||||
MIN_TIMESTAMP_RESOLUTION = 1
|
||||
|
||||
# Set the minimum lag in seconds for a point to be written to the database
|
||||
# in order to optimize batching. This means that each point will wait at least
|
||||
# the duration of this lag before being written. Setting this to 0 disable the feature.
|
||||
# This currently only works when using the timesorted write strategy.
|
||||
# MIN_TIMESTAMP_LAG = 0
|
||||
|
||||
# Set the interface and port for the line (plain text) listener. Setting the
|
||||
# interface to 0.0.0.0 listens on all interfaces. Port can be set to 0 to
|
||||
# disable this listener if it is not required.
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
# Set this to True to enable the UDP listener. By default this is off
|
||||
# because it is very common to run multiple carbon daemons and managing
|
||||
# another (rarely used) port for every carbon instance is not fun.
|
||||
ENABLE_UDP_LISTENER = False
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
# Set the interface and port for the pickle listener. Setting the interface to
|
||||
# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this
|
||||
# listener if it is not required.
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
# Set the interface and port for the protobuf listener. Setting the interface to
|
||||
# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this
|
||||
# listener if it is not required.
|
||||
# PROTOBUF_RECEIVER_INTERFACE = 0.0.0.0
|
||||
# PROTOBUF_RECEIVER_PORT = 2005
|
||||
|
||||
# Limit the number of open connections the receiver can handle as any time.
|
||||
# Default is no limit. Setting up a limit for sites handling high volume
|
||||
# traffic may be recommended to avoid running out of TCP memory or having
|
||||
# thousands of TCP connections reduce the throughput of the service.
|
||||
#MAX_RECEIVER_CONNECTIONS = inf
|
||||
|
||||
# Per security concerns outlined in Bug #817247 the pickle receiver
|
||||
# will use a more secure and slightly less efficient unpickler.
|
||||
# Set this to True to revert to the old-fashioned insecure unpickler.
|
||||
USE_INSECURE_UNPICKLER = False
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Set this to False to drop datapoints received after the cache
|
||||
# reaches MAX_CACHE_SIZE. If this is True (the default) then sockets
|
||||
# over which metrics are received will temporarily stop accepting
|
||||
# data until the cache size falls below 95% MAX_CACHE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# If enabled this setting is used to timeout metric client connection if no
|
||||
# metrics have been sent in specified time in seconds
|
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None
|
||||
|
||||
# By default, carbon-cache will log every whisper update and cache hit.
|
||||
# This can be excessive and degrade performance if logging on the same
|
||||
# volume as the whisper data is stored.
|
||||
LOG_UPDATES = False
|
||||
LOG_CREATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
LOG_CACHE_QUEUE_SORTS = False
|
||||
|
||||
# The thread that writes metrics to disk can use one of the following strategies
|
||||
# determining the order in which metrics are removed from cache and flushed to
|
||||
# disk. The default option preserves the same behavior as has been historically
|
||||
# available in version 0.9.10.
|
||||
#
|
||||
# sorted - All metrics in the cache will be counted and an ordered list of
|
||||
# them will be sorted according to the number of datapoints in the cache at the
|
||||
# moment of the list's creation. Metrics will then be flushed from the cache to
|
||||
# disk in that order.
|
||||
#
|
||||
# timesorted - All metrics in the list will be looked at and sorted according
|
||||
# to the timestamp of there datapoints. The metric that were the least recently
|
||||
# written will be written first. This is an hybrid strategy between max and
|
||||
# sorted which is particularly adapted to sets of metrics with non-uniform
|
||||
# resolutions.
|
||||
#
|
||||
# max - The writer thread will always pop and flush the metric from cache
|
||||
# that has the most datapoints. This will give a strong flush preference to
|
||||
# frequently updated metrics and will also reduce random file-io. Infrequently
|
||||
# updated metrics may only ever be persisted to disk at daemon shutdown if
|
||||
# there are a large number of metrics which receive very frequent updates OR if
|
||||
# disk i/o is very slow.
|
||||
#
|
||||
# naive - Metrics will be flushed from the cache to disk in an unordered
|
||||
# fashion. This strategy may be desirable in situations where the storage for
|
||||
# whisper files is solid state, CPU resources are very limited or deference to
|
||||
# the OS's i/o scheduler is expected to compensate for the random write
|
||||
# pattern.
|
||||
#
|
||||
CACHE_WRITE_STRATEGY = sorted
|
||||
|
||||
# On some systems it is desirable for whisper to write synchronously.
|
||||
# Set this option to True if you'd like to try this. Basically it will
|
||||
# shift the onus of buffering writes from the kernel into carbon's cache.
|
||||
WHISPER_AUTOFLUSH = False
|
||||
|
||||
# By default new Whisper files are created pre-allocated with the data region
|
||||
# filled with zeros to prevent fragmentation and speed up contiguous reads and
|
||||
# writes (which are common). Enabling this option will cause Whisper to create
|
||||
# the file sparsely instead. Enabling this option may allow a large increase of
|
||||
# MAX_CREATES_PER_MINUTE but may have longer term performance implications
|
||||
# depending on the underlying storage configuration.
|
||||
# WHISPER_SPARSE_CREATE = False
|
||||
|
||||
# Only beneficial on linux filesystems that support the fallocate system call.
|
||||
# It maintains the benefits of contiguous reads/writes, but with a potentially
|
||||
# much faster creation speed, by allowing the kernel to handle the block
|
||||
# allocation and zero-ing. Enabling this option may allow a large increase of
|
||||
# MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported
|
||||
# this option will gracefully fallback to standard POSIX file access methods.
|
||||
WHISPER_FALLOCATE_CREATE = True
|
||||
|
||||
# Enabling this option will cause Whisper to lock each Whisper file it writes
|
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
|
||||
# multiple carbon-cache daemons are writing to the same files.
|
||||
# WHISPER_LOCK_WRITES = False
|
||||
|
||||
# On systems which has a large number of metrics, an amount of Whisper write(2)'s
|
||||
# pageback sometimes cause disk thrashing due to memory shortage, so that abnormal
|
||||
# disk reads occur. Enabling this option makes it possible to decrease useless
|
||||
# page cache memory by posix_fadvise(2) with POSIX_FADVISE_RANDOM option.
|
||||
# WHISPER_FADVISE_RANDOM = False
|
||||
|
||||
# By default all nodes stored in Ceres are cached in memory to improve the
|
||||
# throughput of reads and writes to underlying slices. Turning this off will
|
||||
# greatly reduce memory consumption for databases with millions of metrics, at
|
||||
# the cost of a steep increase in disk i/o, approximately an extra two os.stat
|
||||
# calls for every read and write. Reasons to do this are if the underlying
|
||||
# storage can handle stat() with practically zero cost (SSD, NVMe, zRAM).
|
||||
# Valid values are:
|
||||
# all - all nodes are cached
|
||||
# none - node caching is disabled
|
||||
# CERES_NODE_CACHING_BEHAVIOR = all
|
||||
|
||||
# Ceres nodes can have many slices and caching the right ones can improve
|
||||
# performance dramatically. Note that there are many trade-offs to tinkering
|
||||
# with this, and unless you are a ceres developer you *really* should not
|
||||
# mess with this. Valid values are:
|
||||
# latest - only the most recent slice is cached
|
||||
# all - all slices are cached
|
||||
# none - slice caching is disabled
|
||||
# CERES_SLICE_CACHING_BEHAVIOR = latest
|
||||
|
||||
# If a Ceres node accumulates too many slices, performance can suffer.
|
||||
# This can be caused by intermittently reported data. To mitigate
|
||||
# slice fragmentation there is a tolerance for how much space can be
|
||||
# wasted within a slice file to avoid creating a new one. That tolerance
|
||||
# level is determined by MAX_SLICE_GAP, which is the number of consecutive
|
||||
# null datapoints allowed in a slice file.
|
||||
# If you set this very low, you will waste less of the *tiny* bit disk space
|
||||
# that this feature wastes, and you will be prone to performance problems
|
||||
# caused by slice fragmentation, which can be pretty severe.
|
||||
# If you set this really high, you will waste a bit more disk space (each
|
||||
# null datapoint wastes 8 bytes, but keep in mind your filesystem's block
|
||||
# size). If you suffer slice fragmentation issues, you should increase this or
|
||||
# run the ceres-maintenance defrag plugin more often. However you should not
|
||||
# set it to be huge because then if a large but allowed gap occurs it has to
|
||||
# get filled in, which means instead of a simple 8-byte write to a new file we
|
||||
# could end up doing an (8 * MAX_SLICE_GAP)-byte write to the latest slice.
|
||||
# CERES_MAX_SLICE_GAP = 80
|
||||
|
||||
# Enabling this option will cause Ceres to lock each Ceres file it writes to
|
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
|
||||
# multiple carbon-cache daemons are writing to the same files.
|
||||
# CERES_LOCK_WRITES = False
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
|
||||
# missing or empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
# Enable AMQP if you want to receive metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
# AMQP_METRIC_NAME_IN_BODY = False
|
||||
|
||||
# The manhole interface allows you to SSH into the carbon daemon
|
||||
# and get a python interpreter. BE CAREFUL WITH THIS! If you do
|
||||
# something like time.sleep() in the interpreter, the whole process
|
||||
# will sleep! This is *extremely* helpful in debugging, assuming
|
||||
# you are familiar with the code. If you are not, please don't
|
||||
# mess with this, you are asking for trouble :)
|
||||
#
|
||||
# ENABLE_MANHOLE = False
|
||||
# MANHOLE_INTERFACE = 127.0.0.1
|
||||
# MANHOLE_PORT = 7222
|
||||
# MANHOLE_USER = admin
|
||||
# MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE=
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# URL of graphite-web instance, this is used to add incoming series to the tag database
|
||||
GRAPHITE_URL = http://127.0.0.1:80
|
||||
|
||||
# Tag update interval, this specifies how frequently updates to existing series will trigger
|
||||
# an update to the tag index, the default setting is once every 100 updates
|
||||
# TAG_UPDATE_INTERVAL = 100
|
||||
|
||||
# To configure special settings for the carbon-cache instance 'b', uncomment this:
|
||||
#[cache:b]
|
||||
#LINE_RECEIVER_PORT = 2103
|
||||
#PICKLE_RECEIVER_PORT = 2104
|
||||
#CACHE_QUERY_PORT = 7102
|
||||
# and any other settings you want to customize, defaults are inherited
|
||||
# from the [cache] section.
|
||||
# You can then specify the --instance=b option to manage this instance
|
||||
#
|
||||
# In order to turn off logging of successful connections for the line
|
||||
# receiver, set this to False
|
||||
# LOG_LISTENER_CONN_SUCCESS = True
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2013
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2014
|
||||
|
||||
# Carbon-relay has several options for metric routing controlled by RELAY_METHOD
|
||||
#
|
||||
# Use relay-rules.conf to route metrics to destinations based on pattern rules
|
||||
#RELAY_METHOD = rules
|
||||
#
|
||||
# Use consistent-hashing for even distribution of metrics between destinations
|
||||
#RELAY_METHOD = consistent-hashing
|
||||
#
|
||||
# Use consistent-hashing but take into account an aggregation-rules.conf shared
|
||||
# by downstream carbon-aggregator daemons. This will ensure that all metrics
|
||||
# that map to a given aggregation rule are sent to the same carbon-aggregator
|
||||
# instance.
|
||||
# Enable this for carbon-relays that send to a group of carbon-aggregators
|
||||
#RELAY_METHOD = aggregated-consistent-hashing
|
||||
#
|
||||
# You can also use fast-hashing and fast-aggregated-hashing which are in O(1)
|
||||
# and will always redirect the metrics to the same destination but do not try
|
||||
# to minimize rebalancing when the list of destinations is changing.
|
||||
RELAY_METHOD = rules
|
||||
|
||||
# If you use consistent-hashing you can add redundancy by replicating every
|
||||
# datapoint to more than one machine.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# For REPLICATION_FACTOR >=2, set DIVERSE_REPLICAS to True to guarantee replicas
|
||||
# across distributed hosts. With this setting disabled, it's possible that replicas
|
||||
# may be sent to different caches on the same host. This has been the default
|
||||
# behavior since introduction of 'consistent-hashing' relay method.
|
||||
# Note that enabling this on an existing pre-0.9.14 cluster will require rebalancing
|
||||
# your metrics across the cluster nodes using a tool like Carbonate.
|
||||
#DIVERSE_REPLICAS = True
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
# use multiple carbon-cache instances then it would look like this:
|
||||
#
|
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
#
|
||||
# The general form is IP:PORT:INSTANCE where the :INSTANCE part is
|
||||
# optional and refers to the "None" instance if omitted.
|
||||
#
|
||||
# Note that if the destinations are all carbon-caches then this should
|
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
|
||||
# instances listed (order matters!).
|
||||
#
|
||||
# If using RELAY_METHOD = rules, all destinations used in relay-rules.conf
|
||||
# must be defined in this list
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# This define the protocol to use to contact the destination. It can be
|
||||
# set to one of "line", "pickle", "udp" and "protobuf". This list can be
|
||||
# extended with CarbonClientFactory plugins and defaults to "pickle".
|
||||
# DESTINATION_PROTOCOL = pickle
|
||||
|
||||
# When using consistent hashing it sometime makes sense to make
|
||||
# the ring dynamic when you don't want to loose points when a
|
||||
# single destination is down. Replication is an answer to that
|
||||
# but it can be quite expensive.
|
||||
# DYNAMIC_ROUTER = False
|
||||
|
||||
# Controls the number of connection attempts before marking a
|
||||
# destination as down. We usually do one connection attempt per
|
||||
# second.
|
||||
# DYNAMIC_ROUTER_MAX_RETRIES = 5
|
||||
|
||||
# This is the maximum number of datapoints that can be queued up
|
||||
# for a single destination. Once this limit is hit, we will
|
||||
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
|
||||
# we will drop any subsequently received datapoints.
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons. If
|
||||
# your queue is large, setting this to a lower number will cause the
|
||||
# relay to forward smaller discrete chunks of stats, which may prevent
|
||||
# overloading on the receiving side after a disconnect.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
|
||||
# Limit the number of open connections the receiver can handle as any time.
|
||||
# Default is no limit. Setting up a limit for sites handling high volume
|
||||
# traffic may be recommended to avoid running out of TCP memory or having
|
||||
# thousands of TCP connections reduce the throughput of the service.
|
||||
#MAX_RECEIVER_CONNECTIONS = inf
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon-relay runs as the user that invokes it
|
||||
# USER =
|
||||
|
||||
# This is the percentage that the queue must be empty before it will accept
|
||||
# more messages. For a larger site, if the queue is very large it makes sense
|
||||
# to tune this to allow for incoming stats. So if you have an average
|
||||
# flow of 100k stats/minute, and a MAX_QUEUE_SIZE of 3,000,000, it makes sense
|
||||
# to allow stats to start flowing when you've cleared the queue to 95% since
|
||||
# you should have space to accommodate the next minute's worth of stats
|
||||
# even before the relay incrementally clears more of the queue
|
||||
QUEUE_LOW_WATERMARK_PCT = 0.8
|
||||
|
||||
# To allow for batch efficiency from the pickle protocol and to benefit from
|
||||
# other batching advantages, all writes are deferred by putting them into a queue,
|
||||
# and then the queue is flushed and sent a small fraction of a second later.
|
||||
TIME_TO_DEFER_SENDING = 0.0001
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below QUEUE_LOW_WATERMARK_PCT * MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# If enabled this setting is used to timeout metric client connection if no
|
||||
# metrics have been sent in specified time in seconds
|
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
|
||||
# missing or empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
#
|
||||
# In order to turn off logging of successful connections for the line
|
||||
# receiver, set this to False
|
||||
# LOG_LISTENER_CONN_SUCCESS = True
|
||||
|
||||
# If you're connecting from the relay to a destination that's over the
|
||||
# internet or similarly iffy connection, a backlog can develop because
|
||||
# of internet weather conditions, e.g. acks getting lost or similar issues.
|
||||
# To deal with that, you can enable USE_RATIO_RESET which will let you
|
||||
# re-set the connection to an individual destination. Defaults to being off.
|
||||
USE_RATIO_RESET=False
|
||||
|
||||
# When there is a small number of stats flowing, it's not desirable to
|
||||
# perform any actions based on percentages - it's just too "twitchy".
|
||||
MIN_RESET_STAT_FLOW=1000
|
||||
|
||||
# When the ratio of stats being sent in a reporting interval is far
|
||||
# enough from 1.0, we will disconnect the socket and reconnecto to
|
||||
# clear out queued stats. The default ratio of 0.9 indicates that 10%
|
||||
# of stats aren't being delivered within one CARBON_METRIC_INTERVAL
|
||||
# (default of 60 seconds), which can lead to a queue backup. Under
|
||||
# some circumstances re-setting the connection can fix this, so
|
||||
# set this according to your tolerance, and look in the logs for
|
||||
# "resetConnectionForQualityReasons" to observe whether this is kicking
|
||||
# in when your sent queue is building up.
|
||||
MIN_RESET_RATIO=0.9
|
||||
|
||||
# The minimum time between resets. When a connection is re-set, we
|
||||
# need to wait before another reset is performed.
|
||||
# (2*CARBON_METRIC_INTERVAL) + 1 second is the minimum time needed
|
||||
# before stats for the new connection will be available. Setting this
|
||||
# below (2*CARBON_METRIC_INTERVAL) + 1 second will result in a lot of
|
||||
# reset connections for no good reason.
|
||||
MIN_RESET_INTERVAL=121
|
||||
|
||||
[aggregator]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2023
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2024
|
||||
|
||||
# If set true, metric received will be forwarded to DESTINATIONS in addition to
|
||||
# the output of the aggregation rules. If set false the carbon-aggregator will
|
||||
# only ever send the output of aggregation.
|
||||
FORWARD_ALL = True
|
||||
|
||||
# Filenames of the configuration files to use for this instance of aggregator.
|
||||
# Filenames are relative to CONF_DIR.
|
||||
#
|
||||
# AGGREGATION_RULES = aggregation-rules.conf
|
||||
# REWRITE_RULES = rewrite-rules.conf
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
# use multiple carbon-cache instances then it would look like this:
|
||||
#
|
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
#
|
||||
# The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is
|
||||
# optional and refers to the "None" instance if omitted.
|
||||
#
|
||||
# Note that if the destinations are all carbon-caches then this should
|
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
|
||||
# instances listed (order matters!).
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# If you want to add redundancy to your data by replicating every
|
||||
# datapoint to more than one machine, increase this.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# This is the maximum number of datapoints that can be queued up
|
||||
# for a single destination. Once this limit is hit, we will
|
||||
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
|
||||
# we will drop any subsequently received datapoints.
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# If enabled this setting is used to timeout metric client connection if no
|
||||
# metrics have been sent in specified time in seconds
|
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
|
||||
# This defines how many datapoints the aggregator remembers for
|
||||
# each metric. Aggregation only happens for datapoints that fall in
|
||||
# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
|
||||
MAX_AGGREGATION_INTERVALS = 5
|
||||
|
||||
# Limit the number of open connections the receiver can handle as any time.
|
||||
# Default is no limit. Setting up a limit for sites handling high volume
|
||||
# traffic may be recommended to avoid running out of TCP memory or having
|
||||
# thousands of TCP connections reduce the throughput of the service.
|
||||
#MAX_RECEIVER_CONNECTIONS = inf
|
||||
|
||||
# By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back
|
||||
# aggregated data points once every rule.frequency seconds, on a per-rule basis.
|
||||
# Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points
|
||||
# every N seconds, independent of rule frequency. This is useful, for example,
|
||||
# to be able to query partially aggregated metrics from carbon-cache without
|
||||
# having to first wait rule.frequency seconds.
|
||||
# WRITE_BACK_FREQUENCY = 0
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
|
||||
# missing or empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
# In order to turn off logging of successful connections for the line
|
||||
# receiver, set this to False
|
||||
# LOG_LISTENER_CONN_SUCCESS = True
|
||||
|
||||
# In order to turn off logging of metrics with no corresponding
|
||||
# aggregation rules receiver, set this to False
|
||||
# LOG_AGGREGATOR_MISSES = False
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon-aggregator runs as the user that invokes it
|
||||
# USER =
|
||||
|
||||
# Part of the code, and particularly aggregator rules, need
|
||||
# to cache metric names. To avoid leaking too much memory you
|
||||
# can tweak the size of this cache. The default allow for 1M
|
||||
# different metrics per rule (~200MiB).
|
||||
# CACHE_METRIC_NAMES_MAX=1000000
|
||||
|
||||
# You can optionally set a ttl to this cache.
|
||||
# CACHE_METRIC_NAMES_TTL=600
|
@@ -0,0 +1,57 @@
|
||||
# This configuration file controls the behavior of the Dashboard UI, available
|
||||
# at http://my-graphite-server/dashboard/.
|
||||
#
|
||||
# This file must contain a [ui] section that defines values for all of the
|
||||
# following settings.
|
||||
[ui]
|
||||
default_graph_width = 400
|
||||
default_graph_height = 250
|
||||
automatic_variants = true
|
||||
refresh_interval = 60
|
||||
autocomplete_delay = 375
|
||||
merge_hover_delay = 750
|
||||
|
||||
# You can set this 'default', 'white', or a custom theme name.
|
||||
# To create a custom theme, copy the dashboard-default.css file
|
||||
# to dashboard-myThemeName.css in the content/css directory and
|
||||
# modify it to your liking.
|
||||
theme = default
|
||||
|
||||
[keyboard-shortcuts]
|
||||
toggle_toolbar = ctrl-z
|
||||
toggle_metrics_panel = ctrl-space
|
||||
erase_all_graphs = alt-x
|
||||
save_dashboard = alt-s
|
||||
completer_add_metrics = alt-enter
|
||||
completer_del_metrics = alt-backspace
|
||||
give_completer_focus = shift-space
|
||||
|
||||
# These settings apply to the UI as a whole, all other sections in this file
|
||||
# pertain only to specific metric types.
|
||||
#
|
||||
# The dashboard presents only metrics that fall into specified naming schemes
|
||||
# defined in this file. This creates a simpler, more targeted view of the
|
||||
# data. The general form for defining a naming scheme is as follows:
|
||||
#
|
||||
#[Metric Type]
|
||||
#scheme = basis.path.<field1>.<field2>.<fieldN>
|
||||
#field1.label = Foo
|
||||
#field2.label = Bar
|
||||
#
|
||||
#
|
||||
# Where each <field> will be displayed as a dropdown box
|
||||
# in the UI and the remaining portion of the namespace
|
||||
# shown in the Metric Selector panel. The .label options set the labels
|
||||
# displayed for each dropdown.
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
#[Sales]
|
||||
#scheme = sales.<channel>.<type>.<brand>
|
||||
#channel.label = Channel
|
||||
#type.label = Product Type
|
||||
#brand.label = Brand
|
||||
#
|
||||
# This defines a 'Sales' metric type that uses 3 dropdowns in the Context Selector
|
||||
# (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc)
|
||||
# will be available in the Metric Selector (upper-right panel).
|
@@ -0,0 +1,38 @@
|
||||
[default]
|
||||
background = black
|
||||
foreground = white
|
||||
majorLine = white
|
||||
minorLine = grey
|
||||
lineColors = blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose
|
||||
fontName = Sans
|
||||
fontSize = 10
|
||||
fontBold = False
|
||||
fontItalic = False
|
||||
|
||||
[noc]
|
||||
background = black
|
||||
foreground = white
|
||||
majorLine = white
|
||||
minorLine = grey
|
||||
lineColors = blue,green,red,yellow,purple,brown,aqua,grey,magenta,pink,gold,rose
|
||||
fontName = Sans
|
||||
fontSize = 10
|
||||
fontBold = False
|
||||
fontItalic = False
|
||||
|
||||
[plain]
|
||||
background = white
|
||||
foreground = black
|
||||
minorLine = grey
|
||||
majorLine = rose
|
||||
|
||||
[summary]
|
||||
background = black
|
||||
lineColors = #6666ff, #66ff66, #ff6666
|
||||
|
||||
[alphas]
|
||||
background = white
|
||||
foreground = black
|
||||
majorLine = grey
|
||||
minorLine = rose
|
||||
lineColors = 00ff00aa,ff000077,00337799
|
@@ -0,0 +1,21 @@
|
||||
# Relay destination rules for carbon-relay. Entries are scanned in order,
|
||||
# and the first pattern a metric matches will cause processing to cease after sending
|
||||
# unless `continue` is set to true
|
||||
#
|
||||
# [name]
|
||||
# pattern = <regex>
|
||||
# destinations = <list of destination addresses>
|
||||
# continue = <boolean> # default: False
|
||||
#
|
||||
# name: Arbitrary unique name to identify the rule
|
||||
# pattern: Regex pattern to match against the metric name
|
||||
# destinations: Comma-separated list of destinations.
|
||||
# ex: 127.0.0.1, 10.1.2.3:2004, 10.1.2.4:2004:a, myserver.mydomain.com
|
||||
# continue: Continue processing rules if this rule matches (default: False)
|
||||
|
||||
# You must have exactly one section with 'default = true'
|
||||
# Note that all destinations listed must also exist in carbon.conf
|
||||
# in the DESTINATIONS setting in the [relay] section
|
||||
[default]
|
||||
default = true
|
||||
destinations = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
@@ -0,0 +1,18 @@
|
||||
# This file defines regular expression patterns that can be used to
|
||||
# rewrite metric names in a search & replace fashion. It consists of two
|
||||
# sections, [pre] and [post]. The rules in the pre section are applied to
|
||||
# metric names as soon as they are received. The post rules are applied
|
||||
# after aggregation has taken place.
|
||||
#
|
||||
# The general form of each rule is as follows:
|
||||
#
|
||||
# regex-pattern = replacement-text
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
# [post]
|
||||
# _sum$ =
|
||||
# _avg$ =
|
||||
#
|
||||
# These rules would strip off a suffix of _sum or _avg from any metric names
|
||||
# after aggregation.
|
@@ -0,0 +1,42 @@
|
||||
# Aggregation methods for whisper files. Entries are scanned in order,
|
||||
# and first match wins. This file is scanned for changes every 60 seconds
|
||||
#
|
||||
# [name]
|
||||
# pattern = <regex>
|
||||
# xFilesFactor = <float between 0 and 1>
|
||||
# aggregationMethod = <average|sum|last|max|min>
|
||||
#
|
||||
# name: Arbitrary unique name for the rule
|
||||
# pattern: Regex pattern to match against the metric name
|
||||
# xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur
|
||||
# aggregationMethod: function to apply to data points for aggregation
|
||||
#
|
||||
[min]
|
||||
pattern = \.lower$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.upper(_\d+)?$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.sum$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[count]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[count_legacy]
|
||||
pattern = ^stats_counts.*
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.3
|
||||
aggregationMethod = average
|
@@ -0,0 +1,36 @@
|
||||
# Schema definitions for Whisper files. Entries are scanned in order,
|
||||
# and first match wins. This file is scanned for changes every 60 seconds.
|
||||
#
|
||||
# Definition Syntax:
|
||||
#
|
||||
# [name]
|
||||
# pattern = regex
|
||||
# retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ...
|
||||
#
|
||||
# Remember: To support accurate aggregation from higher to lower resolution
|
||||
# archives, the precision of a longer retention archive must be
|
||||
# cleanly divisible by precision of next lower retention archive.
|
||||
#
|
||||
# Valid: 60s:7d,300s:30d (300/60 = 5)
|
||||
# Invalid: 180s:7d,300s:30d (300/180 = 3.333)
|
||||
#
|
||||
|
||||
# Carbon's internal metrics. This entry should match what is specified in
|
||||
# CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings
|
||||
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
@@ -0,0 +1,6 @@
|
||||
# This file takes a single regular expression per line
|
||||
# If USE_WHITELIST is set to True in carbon.conf, only metrics received which
|
||||
# match one of these expressions will be persisted. If this file is empty or
|
||||
# missing, all metrics will pass through.
|
||||
# This file is reloaded automatically when changes are made
|
||||
.*
|
@@ -0,0 +1,94 @@
|
||||
"""Copyright 2008 Orbitz WorldWide
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License."""
|
||||
|
||||
# Django settings for graphite project.
|
||||
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
|
||||
from os.path import dirname, join, abspath
|
||||
|
||||
|
||||
#Django settings below, do not touch!
|
||||
APPEND_SLASH = False
|
||||
TEMPLATE_DEBUG = False
|
||||
|
||||
TEMPLATES = [
|
||||
{
|
||||
'BACKEND': 'django.template.backends.django.DjangoTemplates',
|
||||
'DIRS': [
|
||||
join(dirname( abspath(__file__) ), 'templates')
|
||||
],
|
||||
'APP_DIRS': True,
|
||||
'OPTIONS': {
|
||||
'context_processors': [
|
||||
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
|
||||
# list if you haven't customized them:
|
||||
'django.contrib.auth.context_processors.auth',
|
||||
'django.template.context_processors.debug',
|
||||
'django.template.context_processors.i18n',
|
||||
'django.template.context_processors.media',
|
||||
'django.template.context_processors.static',
|
||||
'django.template.context_processors.tz',
|
||||
'django.contrib.messages.context_processors.messages',
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
# Language code for this installation. All choices can be found here:
|
||||
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
|
||||
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
|
||||
LANGUAGE_CODE = 'en-us'
|
||||
|
||||
# Absolute path to the directory that holds media.
|
||||
MEDIA_ROOT = ''
|
||||
|
||||
# URL that handles the media served from MEDIA_ROOT.
|
||||
# Example: "http://media.lawrence.com"
|
||||
MEDIA_URL = ''
|
||||
|
||||
MIDDLEWARE_CLASSES = (
|
||||
'graphite.middleware.LogExceptionsMiddleware',
|
||||
'django.middleware.common.CommonMiddleware',
|
||||
'django.middleware.gzip.GZipMiddleware',
|
||||
'django.contrib.sessions.middleware.SessionMiddleware',
|
||||
'django.contrib.auth.middleware.AuthenticationMiddleware',
|
||||
'django.contrib.messages.middleware.MessageMiddleware',
|
||||
)
|
||||
|
||||
ROOT_URLCONF = 'graphite.urls'
|
||||
|
||||
INSTALLED_APPS = (
|
||||
'graphite.metrics',
|
||||
'graphite.render',
|
||||
'graphite.browser',
|
||||
'graphite.composer',
|
||||
'graphite.account',
|
||||
'graphite.dashboard',
|
||||
'graphite.whitelist',
|
||||
'graphite.events',
|
||||
'graphite.url_shortener',
|
||||
'django.contrib.auth',
|
||||
'django.contrib.sessions',
|
||||
'django.contrib.admin',
|
||||
'django.contrib.contenttypes',
|
||||
'django.contrib.staticfiles',
|
||||
'tagging',
|
||||
)
|
||||
|
||||
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
|
||||
|
||||
GRAPHITE_WEB_APP_SETTINGS_LOADED = True
|
||||
|
||||
STATIC_URL = '/static/'
|
||||
|
||||
STATIC_ROOT = '/opt/graphite/static/'
|
@@ -0,0 +1,215 @@
|
||||
## Graphite local_settings.py
|
||||
# Edit this file to customize the default Graphite webapp settings
|
||||
#
|
||||
# Additional customizations to Django settings can be added to this file as well
|
||||
|
||||
#####################################
|
||||
# General Configuration #
|
||||
#####################################
|
||||
# Set this to a long, random unique string to use as a secret key for this
|
||||
# install. This key is used for salting of hashes used in auth tokens,
|
||||
# CRSF middleware, cookie storage, etc. This should be set identically among
|
||||
# instances if used behind a load balancer.
|
||||
#SECRET_KEY = 'UNSAFE_DEFAULT'
|
||||
|
||||
# In Django 1.5+ set this to the list of hosts your graphite instances is
|
||||
# accessible as. See:
|
||||
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
|
||||
#ALLOWED_HOSTS = [ '*' ]
|
||||
|
||||
# Set your local timezone (Django's default is America/Chicago)
|
||||
# If your graphs appear to be offset by a couple hours then this probably
|
||||
# needs to be explicitly set to your local timezone.
|
||||
#TIME_ZONE = 'America/Los_Angeles'
|
||||
|
||||
# Override this to provide documentation specific to your Graphite deployment
|
||||
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
|
||||
|
||||
# Logging
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
#LOG_METRIC_ACCESS = True
|
||||
|
||||
# Enable full debug page display on exceptions (Internal Server Error pages)
|
||||
#DEBUG = True
|
||||
|
||||
# If using RRD files and rrdcached, set to the address or socket of the daemon
|
||||
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
|
||||
|
||||
# This lists the memcached servers that will be used by this webapp.
|
||||
# If you have a cluster of webapps you should ensure all of them
|
||||
# have the *exact* same value for this setting. That will maximize cache
|
||||
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
|
||||
# memcached entirely.
|
||||
#
|
||||
# You should not use the loopback address (127.0.0.1) here if using clustering
|
||||
# as every webapp in the cluster should use the exact same values to prevent
|
||||
# unneeded cache misses. Set to [] to disable caching of images and fetched data
|
||||
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
|
||||
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
|
||||
|
||||
|
||||
#####################################
|
||||
# Filesystem Paths #
|
||||
#####################################
|
||||
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
|
||||
# to somewhere else
|
||||
#GRAPHITE_ROOT = '/opt/graphite'
|
||||
|
||||
# Most installs done outside of a separate tree such as /opt/graphite will only
|
||||
# need to change these three settings. Note that the default settings for each
|
||||
# of these is relative to GRAPHITE_ROOT
|
||||
#CONF_DIR = '/opt/graphite/conf'
|
||||
#STORAGE_DIR = '/opt/graphite/storage'
|
||||
#CONTENT_DIR = '/opt/graphite/webapp/content'
|
||||
|
||||
# To further or fully customize the paths, modify the following. Note that the
|
||||
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
|
||||
#
|
||||
## Webapp config files
|
||||
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
|
||||
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
|
||||
|
||||
## Data directories
|
||||
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
|
||||
#WHISPER_DIR = '/opt/graphite/storage/whisper'
|
||||
#RRD_DIR = '/opt/graphite/storage/rrd'
|
||||
#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
|
||||
#LOG_DIR = '/opt/graphite/storage/log/webapp'
|
||||
#INDEX_FILE = '/opt/graphite/storage/index' # Search index file
|
||||
|
||||
|
||||
#####################################
|
||||
# Email Configuration #
|
||||
#####################################
|
||||
# This is used for emailing rendered Graphs
|
||||
# Default backend is SMTP
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
|
||||
#EMAIL_HOST = 'localhost'
|
||||
#EMAIL_PORT = 25
|
||||
#EMAIL_HOST_USER = ''
|
||||
#EMAIL_HOST_PASSWORD = ''
|
||||
#EMAIL_USE_TLS = False
|
||||
# To drop emails on the floor, enable the Dummy backend:
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
|
||||
|
||||
|
||||
#####################################
|
||||
# Authentication Configuration #
|
||||
#####################################
|
||||
## LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
# OR
|
||||
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
#
|
||||
# If you want to further customize the ldap connection options you should
|
||||
# directly use ldap.set_option to set the ldap module's global options.
|
||||
# For example:
|
||||
#
|
||||
#import ldap
|
||||
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
|
||||
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
|
||||
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
|
||||
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
|
||||
# See http://www.python-ldap.org/ for further details on these options.
|
||||
|
||||
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
|
||||
#USE_REMOTE_USER_AUTHENTICATION = True
|
||||
|
||||
# Override the URL for the login link (e.g. for django_openid_auth)
|
||||
#LOGIN_URL = '/account/login'
|
||||
|
||||
|
||||
##########################
|
||||
# Database Configuration #
|
||||
##########################
|
||||
# By default sqlite is used. If you cluster multiple webapps you will need
|
||||
# to setup an external database (such as MySQL) and configure all of the webapp
|
||||
# instances to use the same database. Note that this database is only used to store
|
||||
# Django models such as saved graphs, dashboards, user preferences, etc.
|
||||
# Metric data is not stored here.
|
||||
#
|
||||
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
|
||||
#
|
||||
# The following built-in database engines are available:
|
||||
# django.db.backends.postgresql # Removed in Django 1.4
|
||||
# django.db.backends.postgresql_psycopg2
|
||||
# django.db.backends.mysql
|
||||
# django.db.backends.sqlite3
|
||||
# django.db.backends.oracle
|
||||
#
|
||||
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
|
||||
# located in STORAGE_DIR
|
||||
#
|
||||
#DATABASES = {
|
||||
# 'default': {
|
||||
# 'NAME': '/opt/graphite/storage/graphite.db',
|
||||
# 'ENGINE': 'django.db.backends.sqlite3',
|
||||
# 'USER': '',
|
||||
# 'PASSWORD': '',
|
||||
# 'HOST': '',
|
||||
# 'PORT': ''
|
||||
# }
|
||||
#}
|
||||
#
|
||||
|
||||
|
||||
#########################
|
||||
# Cluster Configuration #
|
||||
#########################
|
||||
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
|
||||
#
|
||||
# This should list the IP address (and optionally port) of the webapp on each
|
||||
# remote server in the cluster. These servers must each have local access to
|
||||
# metric data. Note that the first server to return a match for a query will be
|
||||
# used.
|
||||
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
|
||||
|
||||
## These are timeout values (in seconds) for requests to remote webapps
|
||||
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data
|
||||
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests
|
||||
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp
|
||||
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
|
||||
|
||||
## Remote rendering settings
|
||||
# Set to True to enable rendering of Graphs on a remote webapp
|
||||
#REMOTE_RENDERING = True
|
||||
# List of IP (and optionally port) of the webapp on each remote server that
|
||||
# will be used for rendering. Note that each rendering host should have local
|
||||
# access to metric data or should have CLUSTER_SERVERS configured
|
||||
#RENDERING_HOSTS = []
|
||||
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
|
||||
|
||||
# If you are running multiple carbon-caches on this machine (typically behind a relay using
|
||||
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
|
||||
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
|
||||
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
|
||||
#
|
||||
# You *should* use 127.0.0.1 here in most cases
|
||||
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
|
||||
#CARBONLINK_TIMEOUT = 1.0
|
||||
|
||||
#####################################
|
||||
# Additional Django Settings #
|
||||
#####################################
|
||||
# Uncomment the following line for direct access to Django settings such as
|
||||
# MIDDLEWARE_CLASSES or APPS
|
||||
#from graphite.app_settings import *
|
||||
|
||||
import os
|
||||
|
||||
LOG_DIR = '/var/log/graphite'
|
||||
SECRET_KEY = '$(date +%s | sha256sum | base64 | head -c 64)'
|
||||
|
||||
if (os.getenv("MEMCACHE_HOST") is not None):
|
||||
MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOST").split(",")
|
||||
|
||||
if (os.getenv("DEFAULT_CACHE_DURATION") is not None):
|
||||
DEFAULT_CACHE_DURATION = int(os.getenv("CACHE_DURATION"))
|
||||
|
6
devenv/docker/blocks/graphite1/conf/opt/statsd/config.js
Normal file
6
devenv/docker/blocks/graphite1/conf/opt/statsd/config.js
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"graphiteHost": "127.0.0.1",
|
||||
"graphitePort": 2003,
|
||||
"port": 8125,
|
||||
"flushInterval": 10000
|
||||
}
|
26
devenv/docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
Executable file
26
devenv/docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env expect
|
||||
|
||||
set timeout -1
|
||||
spawn /usr/local/bin/manage.sh
|
||||
|
||||
expect "Would you like to create one now" {
|
||||
send "yes\r"
|
||||
}
|
||||
|
||||
expect "Username" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Email address:" {
|
||||
send "root.graphite@mailinator.com\r"
|
||||
}
|
||||
|
||||
expect "Password:" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Password *:" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Superuser created successfully"
|
3
devenv/docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Executable file
3
devenv/docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings
|
||||
# PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings
|
1179
devenv/docker/blocks/graphite11/big-dashboard.json
Normal file
1179
devenv/docker/blocks/graphite11/big-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
92
devenv/docker/blocks/influxdb/influxdb.conf
Normal file
92
devenv/docker/blocks/influxdb/influxdb.conf
Normal file
@@ -0,0 +1,92 @@
|
||||
reporting-disabled = false
|
||||
|
||||
[meta]
|
||||
# Where the metadata/raft database is stored
|
||||
dir = "/var/lib/influxdb/meta"
|
||||
|
||||
retention-autocreate = true
|
||||
|
||||
# If log messages are printed for the meta service
|
||||
logging-enabled = true
|
||||
pprof-enabled = false
|
||||
|
||||
# The default duration for leases.
|
||||
lease-duration = "1m0s"
|
||||
|
||||
[data]
|
||||
# Controls if this node holds time series data shards in the cluster
|
||||
enabled = true
|
||||
|
||||
dir = "/var/lib/influxdb/data"
|
||||
|
||||
# These are the WAL settings for the storage engine >= 0.9.3
|
||||
wal-dir = "/var/lib/influxdb/wal"
|
||||
wal-logging-enabled = true
|
||||
|
||||
|
||||
[coordinator]
|
||||
write-timeout = "10s"
|
||||
max-concurrent-queries = 0
|
||||
query-timeout = "0"
|
||||
log-queries-after = "0"
|
||||
max-select-point = 0
|
||||
max-select-series = 0
|
||||
max-select-buckets = 0
|
||||
|
||||
[retention]
|
||||
enabled = true
|
||||
check-interval = "30m"
|
||||
|
||||
[shard-precreation]
|
||||
enabled = true
|
||||
check-interval = "10m"
|
||||
advance-period = "30m"
|
||||
|
||||
[monitor]
|
||||
store-enabled = true # Whether to record statistics internally.
|
||||
store-database = "_internal" # The destination database for recorded statistics
|
||||
store-interval = "10s" # The interval at which to record statistics
|
||||
|
||||
[admin]
|
||||
enabled = true
|
||||
bind-address = ":8083"
|
||||
https-enabled = false
|
||||
https-certificate = "/etc/ssl/influxdb.pem"
|
||||
|
||||
[http]
|
||||
enabled = true
|
||||
bind-address = ":8086"
|
||||
auth-enabled = true
|
||||
log-enabled = true
|
||||
write-tracing = false
|
||||
pprof-enabled = false
|
||||
https-enabled = false
|
||||
https-certificate = "/etc/ssl/influxdb.pem"
|
||||
### Use a separate private key location.
|
||||
# https-private-key = ""
|
||||
max-row-limit = 10000
|
||||
realm = "InfluxDB"
|
||||
|
||||
unix-socket-enabled = false # enable http service over unix domain socket
|
||||
# bind-socket = "/var/run/influxdb.sock"
|
||||
|
||||
[subscriber]
|
||||
enabled = true
|
||||
|
||||
[[graphite]]
|
||||
enabled = false
|
||||
|
||||
[[collectd]]
|
||||
enabled = false
|
||||
|
||||
[[opentsdb]]
|
||||
enabled = false
|
||||
|
||||
[[udp]]
|
||||
enabled = false
|
||||
|
||||
[continuous_queries]
|
||||
log-enabled = true
|
||||
enabled = true
|
||||
# run-interval = "1s" # interval for how often continuous queries will be checked if they need to run
|
||||
|
5
devenv/docker/blocks/mssql/build/Dockerfile
Normal file
5
devenv/docker/blocks/mssql/build/Dockerfile
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM microsoft/mssql-server-linux:2017-CU4
|
||||
WORKDIR /usr/setup
|
||||
COPY . /usr/setup
|
||||
RUN chmod +x /usr/setup/setup.sh
|
||||
CMD /bin/bash ./entrypoint.sh
|
2
devenv/docker/blocks/mssql/build/entrypoint.sh
Normal file
2
devenv/docker/blocks/mssql/build/entrypoint.sh
Normal file
@@ -0,0 +1,2 @@
|
||||
#start SQL Server and run setup script
|
||||
/usr/setup/setup.sh & /opt/mssql/bin/sqlservr
|
12
devenv/docker/blocks/mssql/build/setup.sh
Executable file
12
devenv/docker/blocks/mssql/build/setup.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#/bin/bash
|
||||
|
||||
#wait for the SQL Server to come up
|
||||
sleep 15s
|
||||
|
||||
cat /usr/setup/setup.sql.template | awk '{
|
||||
gsub(/%%DB%%/,"'$MSSQL_DATABASE'");
|
||||
gsub(/%%USER%%/,"'$MSSQL_USER'");
|
||||
gsub(/%%PWD%%/,"'$MSSQL_PASSWORD'")
|
||||
}1' > /usr/setup/setup.sql
|
||||
|
||||
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P $MSSQL_SA_PASSWORD -d master -i /usr/setup/setup.sql
|
26
devenv/docker/blocks/mssql/build/setup.sql.template
Normal file
26
devenv/docker/blocks/mssql/build/setup.sql.template
Normal file
@@ -0,0 +1,26 @@
|
||||
CREATE LOGIN %%USER%% WITH PASSWORD = '%%PWD%%'
|
||||
GO
|
||||
|
||||
CREATE DATABASE %%DB%%
|
||||
ON
|
||||
( NAME = %%DB%%,
|
||||
FILENAME = '/var/opt/mssql/data/%%DB%%.mdf',
|
||||
SIZE = 500MB,
|
||||
MAXSIZE = 1000MB,
|
||||
FILEGROWTH = 100MB )
|
||||
LOG ON
|
||||
( NAME = %%DB%%_log,
|
||||
FILENAME = '/var/opt/mssql/data/%%DB%%_log.ldf',
|
||||
SIZE = 500MB,
|
||||
MAXSIZE = 1000MB,
|
||||
FILEGROWTH = 100MB );
|
||||
GO
|
||||
|
||||
USE %%DB%%;
|
||||
GO
|
||||
|
||||
CREATE USER %%USER%% FOR LOGIN %%USER%%;
|
||||
GO
|
||||
|
||||
EXEC sp_addrolemember 'db_owner', '%%USER%%';
|
||||
GO
|
6
devenv/docker/blocks/mysql/config
Normal file
6
devenv/docker/blocks/mysql/config
Normal file
@@ -0,0 +1,6 @@
|
||||
[database]
|
||||
DB_TYPE = mysql
|
||||
HOST = ${DB_1_PORT_3306_TCP_ADDR}:${DB_1_PORT_3306_TCP_PORT}
|
||||
NAME = ${DB_1_ENV_MYSQL_DATABASE}
|
||||
USER = ${DB_1_ENV_MYSQL_USER}
|
||||
PASSWD = ${DB_1_ENV_MYSQL_PASSWORD}
|
20
devenv/docker/blocks/mysql_opendata/Dockerfile
Normal file
20
devenv/docker/blocks/mysql_opendata/Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
## MySQL with Open Data Set from NYC Open Data (https://data.cityofnewyork.us)
|
||||
|
||||
FROM mysql:latest
|
||||
|
||||
ENV MYSQL_DATABASE="testdata" \
|
||||
MYSQL_ROOT_PASSWORD="rootpass" \
|
||||
MYSQL_USER="grafana" \
|
||||
MYSQL_PASSWORD="password"
|
||||
|
||||
# Install requirement (wget)
|
||||
RUN apt-get update && apt-get install -y wget && apt-get install unzip
|
||||
|
||||
# Fetch NYC Data Set
|
||||
RUN wget https://data.cityofnewyork.us/download/57g5-etyj/application%2Fzip -O /tmp/data.zip && \
|
||||
unzip -j /tmp/data.zip 311_Service_Requests_from_2015.csv -d /var/lib/mysql-files && \
|
||||
rm /tmp/data.zip
|
||||
|
||||
ADD import_csv.sql /docker-entrypoint-initdb.d/
|
||||
|
||||
EXPOSE 3306
|
80
devenv/docker/blocks/mysql_opendata/import_csv.sql
Normal file
80
devenv/docker/blocks/mysql_opendata/import_csv.sql
Normal file
@@ -0,0 +1,80 @@
|
||||
use testdata;
|
||||
DROP TABLE IF EXISTS `nyc_open_data`;
|
||||
CREATE TABLE IF NOT EXISTS `nyc_open_data` (
|
||||
UniqueKey bigint(255),
|
||||
`CreatedDate` varchar(255),
|
||||
`ClosedDate` varchar(255),
|
||||
Agency varchar(255),
|
||||
AgencyName varchar(255),
|
||||
ComplaintType varchar(255),
|
||||
Descriptor varchar(255),
|
||||
LocationType varchar(255),
|
||||
IncidentZip varchar(255),
|
||||
IncidentAddress varchar(255),
|
||||
StreetName varchar(255),
|
||||
CrossStreet1 varchar(255),
|
||||
CrossStreet2 varchar(255),
|
||||
IntersectionStreet1 varchar(255),
|
||||
IntersectionStreet2 varchar(255),
|
||||
AddressType varchar(255),
|
||||
City varchar(255),
|
||||
Landmark varchar(255),
|
||||
FacilityType varchar(255),
|
||||
Status varchar(255),
|
||||
`DueDate` varchar(255),
|
||||
ResolutionDescription varchar(2048),
|
||||
`ResolutionActionUpdatedDate` varchar(255),
|
||||
CommunityBoard varchar(255),
|
||||
Borough varchar(255),
|
||||
XCoordinateStatePlane varchar(255),
|
||||
YCoordinateStatePlane varchar(255),
|
||||
ParkFacilityName varchar(255),
|
||||
ParkBorough varchar(255),
|
||||
SchoolName varchar(255),
|
||||
SchoolNumber varchar(255),
|
||||
SchoolRegion varchar(255),
|
||||
SchoolCode varchar(255),
|
||||
SchoolPhoneNumber varchar(255),
|
||||
SchoolAddress varchar(255),
|
||||
SchoolCity varchar(255),
|
||||
SchoolState varchar(255),
|
||||
SchoolZip varchar(255),
|
||||
SchoolNotFound varchar(255),
|
||||
SchoolOrCitywideComplaint varchar(255),
|
||||
VehicleType varchar(255),
|
||||
TaxiCompanyBorough varchar(255),
|
||||
TaxiPickUpLocation varchar(255),
|
||||
BridgeHighwayName varchar(255),
|
||||
BridgeHighwayDirection varchar(255),
|
||||
RoadRamp varchar(255),
|
||||
BridgeHighwaySegment varchar(255),
|
||||
GarageLotName varchar(255),
|
||||
FerryDirection varchar(255),
|
||||
FerryTerminalName varchar(255),
|
||||
Latitude varchar(255),
|
||||
Longitude varchar(255),
|
||||
Location varchar(255)
|
||||
);
|
||||
LOAD DATA INFILE '/var/lib/mysql-files/311_Service_Requests_from_2015.csv' INTO TABLE nyc_open_data FIELDS OPTIONALLY ENCLOSED BY '"' TERMINATED BY ',' IGNORE 1 LINES;
|
||||
UPDATE nyc_open_data SET CreatedDate = STR_TO_DATE(CreatedDate, '%m/%d/%Y %r') WHERE CreatedDate <> '';
|
||||
UPDATE nyc_open_data SET ClosedDate = STR_TO_DATE(ClosedDate, '%m/%d/%Y %r') WHERE ClosedDate <> '';
|
||||
UPDATE nyc_open_data SET DueDate = STR_TO_DATE(DueDate, '%m/%d/%Y %r') WHERE DueDate <> '';
|
||||
UPDATE nyc_open_data SET ResolutionActionUpdatedDate = STR_TO_DATE(ResolutionActionUpdatedDate, '%m/%d/%Y %r') WHERE ResolutionActionUpdatedDate <> '';
|
||||
|
||||
UPDATE nyc_open_data SET CreatedDate=null WHERE CreatedDate = '';
|
||||
UPDATE nyc_open_data SET ClosedDate=null WHERE ClosedDate = '';
|
||||
UPDATE nyc_open_data SET DueDate=null WHERE DueDate = '';
|
||||
UPDATE nyc_open_data SET ResolutionActionUpdatedDate=null WHERE ResolutionActionUpdatedDate = '';
|
||||
|
||||
ALTER TABLE nyc_open_data modify CreatedDate datetime NULL;
|
||||
ALTER TABLE nyc_open_data modify ClosedDate datetime NULL;
|
||||
ALTER TABLE nyc_open_data modify DueDate datetime NULL;
|
||||
ALTER TABLE nyc_open_data modify ResolutionActionUpdatedDate datetime NULL;
|
||||
|
||||
ALTER TABLE `nyc_open_data` ADD INDEX `IX_ComplaintType` (`ComplaintType`);
|
||||
ALTER TABLE `nyc_open_data` ADD INDEX `IX_CreatedDate` (`CreatedDate`);
|
||||
ALTER TABLE `nyc_open_data` ADD INDEX `IX_LocationType` (`LocationType`);
|
||||
ALTER TABLE `nyc_open_data` ADD INDEX `IX_AgencyName` (`AgencyName`);
|
||||
ALTER TABLE `nyc_open_data` ADD INDEX `IX_City` (`City`);
|
||||
|
||||
SYSTEM rm /var/lib/mysql-files/311_Service_Requests_from_2015.csv
|
3
devenv/docker/blocks/mysql_tests/Dockerfile
Normal file
3
devenv/docker/blocks/mysql_tests/Dockerfile
Normal file
@@ -0,0 +1,3 @@
|
||||
FROM mysql:5.6
|
||||
ADD setup.sql /docker-entrypoint-initdb.d
|
||||
CMD ["mysqld"]
|
2
devenv/docker/blocks/mysql_tests/setup.sql
Normal file
2
devenv/docker/blocks/mysql_tests/setup.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
CREATE DATABASE grafana_ds_tests;
|
||||
GRANT ALL PRIVILEGES ON grafana_ds_tests.* TO 'grafana';
|
4
devenv/docker/blocks/nginx_proxy/Dockerfile
Normal file
4
devenv/docker/blocks/nginx_proxy/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM nginx:alpine
|
||||
|
||||
COPY nginx.conf /etc/nginx/nginx.conf
|
||||
COPY htpasswd /etc/nginx/htpasswd
|
3
devenv/docker/blocks/nginx_proxy/htpasswd
Executable file
3
devenv/docker/blocks/nginx_proxy/htpasswd
Executable file
@@ -0,0 +1,3 @@
|
||||
user1:$apr1$1odeeQb.$kwV8D/VAAGUDU7pnHuKoV0
|
||||
user2:$apr1$A2kf25r.$6S0kp3C7vIuixS5CL0XA9.
|
||||
admin:$apr1$IWn4DoRR$E2ol7fS/dkI18eU4bXnBO1
|
38
devenv/docker/blocks/nginx_proxy/nginx.conf
Normal file
38
devenv/docker/blocks/nginx_proxy/nginx.conf
Normal file
@@ -0,0 +1,38 @@
|
||||
events { worker_connections 1024; }
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
|
||||
server {
|
||||
listen 10080;
|
||||
|
||||
location /grafana/ {
|
||||
################################################################
|
||||
# Enable these settings to test with basic auth and an auth proxy header
|
||||
# the htpasswd file contains an admin user with password admin and
|
||||
# user1: grafana and user2: grafana
|
||||
################################################################
|
||||
|
||||
# auth_basic "Restricted Content";
|
||||
# auth_basic_user_file /etc/nginx/htpasswd;
|
||||
|
||||
################################################################
|
||||
# To use the auth proxy header, set the following in custom.ini:
|
||||
# [auth.proxy]
|
||||
# enabled = true
|
||||
# header_name = X-WEBAUTH-USER
|
||||
# header_property = username
|
||||
################################################################
|
||||
|
||||
# proxy_set_header X-WEBAUTH-USER $remote_user;
|
||||
|
||||
proxy_pass http://localhost:3000/;
|
||||
}
|
||||
}
|
||||
}
|
30
devenv/docker/blocks/openldap/Dockerfile
Normal file
30
devenv/docker/blocks/openldap/Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Fork of https://github.com/dinkel/docker-openldap
|
||||
|
||||
FROM debian:jessie
|
||||
|
||||
LABEL maintainer="Christian Luginbühl <dinke@pimprecords.com>"
|
||||
|
||||
ENV OPENLDAP_VERSION 2.4.40
|
||||
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
|
||||
slapd=${OPENLDAP_VERSION}* \
|
||||
ldap-utils && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mv /etc/ldap /etc/ldap.dist
|
||||
|
||||
EXPOSE 389
|
||||
|
||||
VOLUME ["/etc/ldap", "/var/lib/ldap"]
|
||||
|
||||
COPY modules/ /etc/ldap.dist/modules
|
||||
COPY prepopulate/ /etc/ldap.dist/prepopulate
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
COPY prepopulate.sh /prepopulate.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
CMD ["slapd", "-d", "32768", "-u", "openldap", "-g", "openldap"]
|
98
devenv/docker/blocks/openldap/entrypoint.sh
Executable file
98
devenv/docker/blocks/openldap/entrypoint.sh
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
|
||||
# When not limiting the open file descritors limit, the memory consumption of
|
||||
# slapd is absurdly high. See https://github.com/docker/docker/issues/8231
|
||||
ulimit -n 8192
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
chown -R openldap:openldap /var/lib/ldap/
|
||||
|
||||
if [[ ! -d /etc/ldap/slapd.d ]]; then
|
||||
|
||||
if [[ -z "$SLAPD_PASSWORD" ]]; then
|
||||
echo -n >&2 "Error: Container not configured and SLAPD_PASSWORD not set. "
|
||||
echo >&2 "Did you forget to add -e SLAPD_PASSWORD=... ?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$SLAPD_DOMAIN" ]]; then
|
||||
echo -n >&2 "Error: Container not configured and SLAPD_DOMAIN not set. "
|
||||
echo >&2 "Did you forget to add -e SLAPD_DOMAIN=... ?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SLAPD_ORGANIZATION="${SLAPD_ORGANIZATION:-${SLAPD_DOMAIN}}"
|
||||
|
||||
cp -a /etc/ldap.dist/* /etc/ldap
|
||||
|
||||
cat <<-EOF | debconf-set-selections
|
||||
slapd slapd/no_configuration boolean false
|
||||
slapd slapd/password1 password $SLAPD_PASSWORD
|
||||
slapd slapd/password2 password $SLAPD_PASSWORD
|
||||
slapd shared/organization string $SLAPD_ORGANIZATION
|
||||
slapd slapd/domain string $SLAPD_DOMAIN
|
||||
slapd slapd/backend select HDB
|
||||
slapd slapd/allow_ldap_v2 boolean false
|
||||
slapd slapd/purge_database boolean false
|
||||
slapd slapd/move_old_database boolean true
|
||||
EOF
|
||||
|
||||
dpkg-reconfigure -f noninteractive slapd >/dev/null 2>&1
|
||||
|
||||
dc_string=""
|
||||
|
||||
IFS="."; declare -a dc_parts=($SLAPD_DOMAIN)
|
||||
|
||||
for dc_part in "${dc_parts[@]}"; do
|
||||
dc_string="$dc_string,dc=$dc_part"
|
||||
done
|
||||
|
||||
base_string="BASE ${dc_string:1}"
|
||||
|
||||
sed -i "s/^#BASE.*/${base_string}/g" /etc/ldap/ldap.conf
|
||||
|
||||
if [[ -n "$SLAPD_CONFIG_PASSWORD" ]]; then
|
||||
password_hash=`slappasswd -s "${SLAPD_CONFIG_PASSWORD}"`
|
||||
|
||||
sed_safe_password_hash=${password_hash//\//\\\/}
|
||||
|
||||
slapcat -n0 -F /etc/ldap/slapd.d -l /tmp/config.ldif
|
||||
sed -i "s/\(olcRootDN: cn=admin,cn=config\)/\1\nolcRootPW: ${sed_safe_password_hash}/g" /tmp/config.ldif
|
||||
rm -rf /etc/ldap/slapd.d/*
|
||||
slapadd -n0 -F /etc/ldap/slapd.d -l /tmp/config.ldif >/dev/null 2>&1
|
||||
fi
|
||||
|
||||
if [[ -n "$SLAPD_ADDITIONAL_SCHEMAS" ]]; then
|
||||
IFS=","; declare -a schemas=($SLAPD_ADDITIONAL_SCHEMAS); unset IFS
|
||||
|
||||
for schema in "${schemas[@]}"; do
|
||||
slapadd -n0 -F /etc/ldap/slapd.d -l "/etc/ldap/schema/${schema}.ldif" >/dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -n "$SLAPD_ADDITIONAL_MODULES" ]]; then
|
||||
IFS=","; declare -a modules=($SLAPD_ADDITIONAL_MODULES); unset IFS
|
||||
|
||||
for module in "${modules[@]}"; do
|
||||
echo "Adding module ${module}"
|
||||
slapadd -n0 -F /etc/ldap/slapd.d -l "/etc/ldap/modules/${module}.ldif" >/dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
# This needs to run in background
|
||||
# Will prepopulate entries after ldap daemon has started
|
||||
./prepopulate.sh &
|
||||
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/ /var/lib/ldap/ /var/run/slapd/
|
||||
else
|
||||
slapd_configs_in_env=`env | grep 'SLAPD_'`
|
||||
|
||||
if [ -n "${slapd_configs_in_env:+x}" ]; then
|
||||
echo "Info: Container already configured, therefore ignoring SLAPD_xxx environment variables"
|
||||
fi
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
|
86
devenv/docker/blocks/openldap/ldap_dev.toml
Normal file
86
devenv/docker/blocks/openldap/ldap_dev.toml
Normal file
@@ -0,0 +1,86 @@
|
||||
# To troubleshoot and get more log info enable ldap debug logging in grafana.ini
|
||||
# [log]
|
||||
# filters = ldap:debug
|
||||
|
||||
[[servers]]
|
||||
# Ldap server host (specify multiple hosts space separated)
|
||||
host = "127.0.0.1"
|
||||
# Default port is 389 or 636 if use_ssl = true
|
||||
port = 389
|
||||
# Set to true if ldap server supports TLS
|
||||
use_ssl = false
|
||||
# Set to true if connect ldap server with STARTTLS pattern (create connection in insecure, then upgrade to secure connection with TLS)
|
||||
start_tls = false
|
||||
# set to true if you want to skip ssl cert validation
|
||||
ssl_skip_verify = false
|
||||
# set to the path to your root CA certificate or leave unset to use system defaults
|
||||
# root_ca_cert = "/path/to/certificate.crt"
|
||||
|
||||
# Search user bind dn
|
||||
bind_dn = "cn=admin,dc=grafana,dc=org"
|
||||
# Search user bind password
|
||||
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
|
||||
bind_password = 'grafana'
|
||||
|
||||
# User search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" or "(uid=%s)"
|
||||
search_filter = "(cn=%s)"
|
||||
|
||||
# An array of base dns to search through
|
||||
search_base_dns = ["dc=grafana,dc=org"]
|
||||
|
||||
# In POSIX LDAP schemas, without memberOf attribute a secondary query must be made for groups.
|
||||
# This is done by enabling group_search_filter below. You must also set member_of= "cn"
|
||||
# in [servers.attributes] below.
|
||||
|
||||
# Users with nested/recursive group membership and an LDAP server that supports LDAP_MATCHING_RULE_IN_CHAIN
|
||||
# can set group_search_filter, group_search_filter_user_attribute, group_search_base_dns and member_of
|
||||
# below in such a way that the user's recursive group membership is considered.
|
||||
#
|
||||
# Nested Groups + Active Directory (AD) Example:
|
||||
#
|
||||
# AD groups store the Distinguished Names (DNs) of members, so your filter must
|
||||
# recursively search your groups for the authenticating user's DN. For example:
|
||||
#
|
||||
# group_search_filter = "(member:1.2.840.113556.1.4.1941:=%s)"
|
||||
# group_search_filter_user_attribute = "distinguishedName"
|
||||
# group_search_base_dns = ["ou=groups,dc=grafana,dc=org"]
|
||||
#
|
||||
# [servers.attributes]
|
||||
# ...
|
||||
# member_of = "distinguishedName"
|
||||
|
||||
## Group search filter, to retrieve the groups of which the user is a member (only set if memberOf attribute is not available)
|
||||
# group_search_filter = "(&(objectClass=posixGroup)(memberUid=%s))"
|
||||
## Group search filter user attribute defines what user attribute gets substituted for %s in group_search_filter.
|
||||
## Defaults to the value of username in [server.attributes]
|
||||
## Valid options are any of your values in [servers.attributes]
|
||||
## If you are using nested groups you probably want to set this and member_of in
|
||||
## [servers.attributes] to "distinguishedName"
|
||||
# group_search_filter_user_attribute = "distinguishedName"
|
||||
## An array of the base DNs to search through for groups. Typically uses ou=groups
|
||||
# group_search_base_dns = ["ou=groups,dc=grafana,dc=org"]
|
||||
|
||||
# Specify names of the ldap attributes your ldap uses
|
||||
[servers.attributes]
|
||||
name = "givenName"
|
||||
surname = "sn"
|
||||
username = "cn"
|
||||
member_of = "memberOf"
|
||||
email = "email"
|
||||
|
||||
# Map ldap groups to grafana org roles
|
||||
[[servers.group_mappings]]
|
||||
group_dn = "cn=admins,ou=groups,dc=grafana,dc=org"
|
||||
org_role = "Admin"
|
||||
grafana_admin = true
|
||||
# The Grafana organization database id, optional, if left out the default org (id 1) will be used
|
||||
# org_id = 1
|
||||
|
||||
[[servers.group_mappings]]
|
||||
group_dn = "cn=editors,ou=groups,dc=grafana,dc=org"
|
||||
org_role = "Editor"
|
||||
|
||||
[[servers.group_mappings]]
|
||||
# If you want to match all (or no ldap groups) then you can use wildcard
|
||||
group_dn = "*"
|
||||
org_role = "Viewer"
|
33
devenv/docker/blocks/openldap/modules/memberof.ldif
Normal file
33
devenv/docker/blocks/openldap/modules/memberof.ldif
Normal file
@@ -0,0 +1,33 @@
|
||||
dn: cn=module,cn=config
|
||||
cn: module
|
||||
objectClass: olcModuleList
|
||||
objectClass: top
|
||||
olcModulePath: /usr/lib/ldap
|
||||
olcModuleLoad: memberof.la
|
||||
|
||||
dn: olcOverlay={0}memberof,olcDatabase={1}hdb,cn=config
|
||||
objectClass: olcConfig
|
||||
objectClass: olcMemberOf
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: top
|
||||
olcOverlay: memberof
|
||||
olcMemberOfDangling: ignore
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfGroupOC: groupOfNames
|
||||
olcMemberOfMemberAD: member
|
||||
olcMemberOfMemberOfAD: memberOf
|
||||
|
||||
dn: cn=module,cn=config
|
||||
cn: module
|
||||
objectClass: olcModuleList
|
||||
objectClass: top
|
||||
olcModulePath: /usr/lib/ldap
|
||||
olcModuleLoad: refint.la
|
||||
|
||||
dn: olcOverlay={1}refint,olcDatabase={1}hdb,cn=config
|
||||
objectClass: olcConfig
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcRefintConfig
|
||||
objectClass: top
|
||||
olcOverlay: {1}refint
|
||||
olcRefintAttribute: memberof member manager owner
|
45
devenv/docker/blocks/openldap/notes.md
Normal file
45
devenv/docker/blocks/openldap/notes.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Notes on OpenLdap Docker Block
|
||||
|
||||
Any ldif files added to the prepopulate subdirectory will be automatically imported into the OpenLdap database.
|
||||
|
||||
The ldif files add three users, `ldapviewer`, `ldapeditor` and `ldapadmin`. Two groups, `admins` and `users`, are added that correspond with the group mappings in the default conf/ldap.toml. `ldapadmin` is a member of `admins` and `ldapeditor` is a member of `users`.
|
||||
|
||||
Note that users that are added here need to specify a `memberOf` attribute manually as well as the `member` attribute for the group. The `memberOf` module usually does this automatically (if you add a group in Apache Directory Studio for example) but this does not work in the entrypoint script as it uses the `slapadd` command to add entries before the server has started and before the `memberOf` module is loaded.
|
||||
|
||||
After adding ldif files to `prepopulate`:
|
||||
|
||||
1. Remove your current docker image: `docker rm docker_openldap_1`
|
||||
2. Build: `docker-compose build`
|
||||
3. `docker-compose up`
|
||||
|
||||
## Enabling LDAP in Grafana
|
||||
|
||||
Copy the ldap_dev.toml file in this folder into your `conf` folder (it is gitignored already). To enable it in the .ini file to get Grafana to use this block:
|
||||
|
||||
```ini
|
||||
[auth.ldap]
|
||||
enabled = true
|
||||
config_file = conf/ldap_dev.toml
|
||||
; allow_sign_up = true
|
||||
```
|
||||
|
||||
Test groups & users
|
||||
|
||||
admins
|
||||
ldap-admin
|
||||
ldap-torkel
|
||||
ldap-daniel
|
||||
backend
|
||||
ldap-carl
|
||||
ldap-torkel
|
||||
ldap-leo
|
||||
frontend
|
||||
ldap-torkel
|
||||
ldap-tobias
|
||||
ldap-daniel
|
||||
editors
|
||||
ldap-editors
|
||||
|
||||
|
||||
no groups
|
||||
ldap-viewer
|
14
devenv/docker/blocks/openldap/prepopulate.sh
Executable file
14
devenv/docker/blocks/openldap/prepopulate.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Pre-populating ldap entries, first waiting for ldap to start"
|
||||
|
||||
sleep 3
|
||||
|
||||
adminUserDn="cn=admin,dc=grafana,dc=org"
|
||||
adminPassword="grafana"
|
||||
|
||||
for file in `ls /etc/ldap/prepopulate/*.ldif`; do
|
||||
ldapadd -x -D $adminUserDn -w $adminPassword -f "$file"
|
||||
done
|
||||
|
||||
|
9
devenv/docker/blocks/openldap/prepopulate/1_units.ldif
Normal file
9
devenv/docker/blocks/openldap/prepopulate/1_units.ldif
Normal file
@@ -0,0 +1,9 @@
|
||||
dn: ou=groups,dc=grafana,dc=org
|
||||
ou: Groups
|
||||
objectclass: top
|
||||
objectclass: organizationalUnit
|
||||
|
||||
dn: ou=users,dc=grafana,dc=org
|
||||
ou: Users
|
||||
objectclass: top
|
||||
objectclass: organizationalUnit
|
80
devenv/docker/blocks/openldap/prepopulate/2_users.ldif
Normal file
80
devenv/docker/blocks/openldap/prepopulate/2_users.ldif
Normal file
@@ -0,0 +1,80 @@
|
||||
# ldap-admin
|
||||
dn: cn=ldap-admin,ou=users,dc=grafana,dc=org
|
||||
mail: ldap-admin@grafana.com
|
||||
userPassword: grafana
|
||||
objectClass: person
|
||||
objectClass: top
|
||||
objectClass: inetOrgPerson
|
||||
objectClass: organizationalPerson
|
||||
sn: ldap-admin
|
||||
cn: ldap-admin
|
||||
|
||||
dn: cn=ldap-editor,ou=users,dc=grafana,dc=org
|
||||
mail: ldap-editor@grafana.com
|
||||
userPassword: grafana
|
||||
objectClass: person
|
||||
objectClass: top
|
||||
objectClass: inetOrgPerson
|
||||
objectClass: organizationalPerson
|
||||
sn: ldap-editor
|
||||
cn: ldap-editor
|
||||
|
||||
dn: cn=ldap-viewer,ou=users,dc=grafana,dc=org
|
||||
mail: ldap-viewer@grafana.com
|
||||
userPassword: grafana
|
||||
objectClass: person
|
||||
objectClass: top
|
||||
objectClass: inetOrgPerson
|
||||
objectClass: organizationalPerson
|
||||
sn: ldap-viewer
|
||||
cn: ldap-viewer
|
||||
|
||||
dn: cn=ldap-carl,ou=users,dc=grafana,dc=org
|
||||
mail: ldap-carl@grafana.com
|
||||
userPassword: grafana
|
||||
objectClass: person
|
||||
objectClass: top
|
||||
objectClass: inetOrgPerson
|
||||
objectClass: organizationalPerson
|
||||
sn: ldap-carl
|
||||
cn: ldap-carl
|
||||
|
||||
dn: cn=ldap-daniel,ou=users,dc=grafana,dc=org
|
||||
mail: ldap-daniel@grafana.com
|
||||
userPassword: grafana
|
||||
objectClass: person
|
||||
objectClass: top
|
||||
objectClass: inetOrgPerson
|
||||
objectClass: organizationalPerson
|
||||
sn: ldap-daniel
|
||||
cn: ldap-daniel
|
||||
|
||||
dn: cn=ldap-leo,ou=users,dc=grafana,dc=org
|
||||
mail: ldap-leo@grafana.com
|
||||
userPassword: grafana
|
||||
objectClass: person
|
||||
objectClass: top
|
||||
objectClass: inetOrgPerson
|
||||
objectClass: organizationalPerson
|
||||
sn: ldap-leo
|
||||
cn: ldap-leo
|
||||
|
||||
dn: cn=ldap-tobias,ou=users,dc=grafana,dc=org
|
||||
mail: ldap-tobias@grafana.com
|
||||
userPassword: grafana
|
||||
objectClass: person
|
||||
objectClass: top
|
||||
objectClass: inetOrgPerson
|
||||
objectClass: organizationalPerson
|
||||
sn: ldap-tobias
|
||||
cn: ldap-tobias
|
||||
|
||||
dn: cn=ldap-torkel,ou=users,dc=grafana,dc=org
|
||||
mail: ldap-torkel@grafana.com
|
||||
userPassword: grafana
|
||||
objectClass: person
|
||||
objectClass: top
|
||||
objectClass: inetOrgPerson
|
||||
objectClass: organizationalPerson
|
||||
sn: ldap-torkel
|
||||
cn: ldap-torkel
|
25
devenv/docker/blocks/openldap/prepopulate/3_groups.ldif
Normal file
25
devenv/docker/blocks/openldap/prepopulate/3_groups.ldif
Normal file
@@ -0,0 +1,25 @@
|
||||
dn: cn=admins,ou=groups,dc=grafana,dc=org
|
||||
cn: admins
|
||||
objectClass: groupOfNames
|
||||
objectClass: top
|
||||
member: cn=ldap-admin,ou=users,dc=grafana,dc=org
|
||||
member: cn=ldap-torkel,ou=users,dc=grafana,dc=org
|
||||
|
||||
dn: cn=editors,ou=groups,dc=grafana,dc=org
|
||||
cn: editors
|
||||
objectClass: groupOfNames
|
||||
member: cn=ldap-editor,ou=users,dc=grafana,dc=org
|
||||
|
||||
dn: cn=backend,ou=groups,dc=grafana,dc=org
|
||||
cn: backend
|
||||
objectClass: groupOfNames
|
||||
member: cn=ldap-carl,ou=users,dc=grafana,dc=org
|
||||
member: cn=ldap-leo,ou=users,dc=grafana,dc=org
|
||||
member: cn=ldap-torkel,ou=users,dc=grafana,dc=org
|
||||
|
||||
dn: cn=frontend,ou=groups,dc=grafana,dc=org
|
||||
cn: frontend
|
||||
objectClass: groupOfNames
|
||||
member: cn=ldap-torkel,ou=users,dc=grafana,dc=org
|
||||
member: cn=ldap-daniel,ou=users,dc=grafana,dc=org
|
||||
member: cn=ldap-leo,ou=users,dc=grafana,dc=org
|
3
devenv/docker/blocks/postgres_tests/Dockerfile
Normal file
3
devenv/docker/blocks/postgres_tests/Dockerfile
Normal file
@@ -0,0 +1,3 @@
|
||||
FROM postgres:9.3
|
||||
ADD setup.sql /docker-entrypoint-initdb.d
|
||||
CMD ["postgres"]
|
3
devenv/docker/blocks/postgres_tests/setup.sql
Normal file
3
devenv/docker/blocks/postgres_tests/setup.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
CREATE DATABASE grafanadstest;
|
||||
REVOKE CONNECT ON DATABASE grafanadstest FROM PUBLIC;
|
||||
GRANT CONNECT ON DATABASE grafanadstest TO grafanatest;
|
3
devenv/docker/blocks/prometheus/Dockerfile
Normal file
3
devenv/docker/blocks/prometheus/Dockerfile
Normal file
@@ -0,0 +1,3 @@
|
||||
FROM prom/prometheus:v1.8.2
|
||||
ADD prometheus.yml /etc/prometheus/
|
||||
ADD alert.rules /etc/prometheus/
|
10
devenv/docker/blocks/prometheus/alert.rules
Normal file
10
devenv/docker/blocks/prometheus/alert.rules
Normal file
@@ -0,0 +1,10 @@
|
||||
# Alert Rules
|
||||
|
||||
ALERT AppCrash
|
||||
IF process_open_fds > 0
|
||||
FOR 15s
|
||||
LABELS { severity="critical" }
|
||||
ANNOTATIONS {
|
||||
summary = "Number of open fds > 0",
|
||||
description = "Just testing"
|
||||
}
|
39
devenv/docker/blocks/prometheus/prometheus.yml
Normal file
39
devenv/docker/blocks/prometheus/prometheus.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 10s # By default, scrape targets every 15 seconds.
|
||||
evaluation_interval: 10s # By default, scrape targets every 15 seconds.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
|
||||
rule_files:
|
||||
- "alert.rules"
|
||||
# - "first.rules"
|
||||
# - "second.rules"
|
||||
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: http
|
||||
static_configs:
|
||||
- targets:
|
||||
- "127.0.0.1:9093"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'node_exporter'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9100']
|
||||
|
||||
- job_name: 'fake-data-gen'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9091']
|
||||
|
||||
- job_name: 'grafana'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:3000']
|
||||
|
||||
- job_name: 'prometheus-random-data'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:8081']
|
3
devenv/docker/blocks/prometheus2/Dockerfile
Normal file
3
devenv/docker/blocks/prometheus2/Dockerfile
Normal file
@@ -0,0 +1,3 @@
|
||||
FROM prom/prometheus:v2.2.0
|
||||
ADD prometheus.yml /etc/prometheus/
|
||||
ADD alert.rules /etc/prometheus/
|
10
devenv/docker/blocks/prometheus2/alert.rules
Normal file
10
devenv/docker/blocks/prometheus2/alert.rules
Normal file
@@ -0,0 +1,10 @@
|
||||
# Alert Rules
|
||||
|
||||
ALERT AppCrash
|
||||
IF process_open_fds > 0
|
||||
FOR 15s
|
||||
LABELS { severity="critical" }
|
||||
ANNOTATIONS {
|
||||
summary = "Number of open fds > 0",
|
||||
description = "Just testing"
|
||||
}
|
39
devenv/docker/blocks/prometheus2/prometheus.yml
Normal file
39
devenv/docker/blocks/prometheus2/prometheus.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 10s # By default, scrape targets every 15 seconds.
|
||||
evaluation_interval: 10s # By default, scrape targets every 15 seconds.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
|
||||
#rule_files:
|
||||
# - "alert.rules"
|
||||
# - "first.rules"
|
||||
# - "second.rules"
|
||||
|
||||
# alerting:
|
||||
# alertmanagers:
|
||||
# - scheme: http
|
||||
# static_configs:
|
||||
# - targets:
|
||||
# - "127.0.0.1:9093"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'node_exporter'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9100']
|
||||
|
||||
- job_name: 'fake-data-gen'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9091']
|
||||
|
||||
- job_name: 'grafana'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:3000']
|
||||
|
||||
- job_name: 'prometheus-random-data'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:8081']
|
3
devenv/docker/blocks/prometheus_mac/Dockerfile
Normal file
3
devenv/docker/blocks/prometheus_mac/Dockerfile
Normal file
@@ -0,0 +1,3 @@
|
||||
FROM prom/prometheus:v1.8.2
|
||||
ADD prometheus.yml /etc/prometheus/
|
||||
ADD alert.rules /etc/prometheus/
|
10
devenv/docker/blocks/prometheus_mac/alert.rules
Normal file
10
devenv/docker/blocks/prometheus_mac/alert.rules
Normal file
@@ -0,0 +1,10 @@
|
||||
# Alert Rules
|
||||
|
||||
ALERT AppCrash
|
||||
IF process_open_fds > 0
|
||||
FOR 15s
|
||||
LABELS { severity="critical" }
|
||||
ANNOTATIONS {
|
||||
summary = "Number of open fds > 0",
|
||||
description = "Just testing"
|
||||
}
|
39
devenv/docker/blocks/prometheus_mac/prometheus.yml
Normal file
39
devenv/docker/blocks/prometheus_mac/prometheus.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 10s # By default, scrape targets every 15 seconds.
|
||||
evaluation_interval: 10s # By default, scrape targets every 15 seconds.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
|
||||
rule_files:
|
||||
- "alert.rules"
|
||||
# - "first.rules"
|
||||
# - "second.rules"
|
||||
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: http
|
||||
static_configs:
|
||||
- targets:
|
||||
- "alertmanager:9093"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'node_exporter'
|
||||
static_configs:
|
||||
- targets: ['node_exporter:9100']
|
||||
|
||||
- job_name: 'fake-data-gen'
|
||||
static_configs:
|
||||
- targets: ['fake-prometheus-data:9091']
|
||||
|
||||
- job_name: 'grafana'
|
||||
static_configs:
|
||||
- targets: ['host.docker.internal:3000']
|
||||
|
||||
- job_name: 'prometheus-random-data'
|
||||
static_configs:
|
||||
- targets: ['prometheus-random-data:8080']
|
18
devenv/docker/blocks/prometheus_random_data/Dockerfile
Normal file
18
devenv/docker/blocks/prometheus_random_data/Dockerfile
Normal file
@@ -0,0 +1,18 @@
|
||||
# This Dockerfile builds an image for a client_golang example.
|
||||
|
||||
# Builder image, where we build the example.
|
||||
FROM golang:1.9.0 AS builder
|
||||
# Download prometheus/client_golang/examples/random first
|
||||
RUN go get github.com/prometheus/client_golang/examples/random
|
||||
WORKDIR /go/src/github.com/prometheus/client_golang
|
||||
WORKDIR /go/src/github.com/prometheus/client_golang/prometheus
|
||||
RUN go get -d
|
||||
WORKDIR /go/src/github.com/prometheus/client_golang/examples/random
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w'
|
||||
|
||||
# Final image.
|
||||
FROM scratch
|
||||
LABEL maintainer "The Prometheus Authors <prometheus-developers@googlegroups.com>"
|
||||
COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/random .
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT ["/random"]
|
13
devenv/docker/blocks/smtp/Dockerfile
Normal file
13
devenv/docker/blocks/smtp/Dockerfile
Normal file
@@ -0,0 +1,13 @@
|
||||
FROM centos:centos7
|
||||
LABEL maintainer="Przemyslaw Ozgo <linux@ozgo.info>"
|
||||
|
||||
RUN \
|
||||
yum update -y && \
|
||||
yum install -y net-snmp net-snmp-utils && \
|
||||
yum clean all
|
||||
|
||||
COPY bootstrap.sh /tmp/bootstrap.sh
|
||||
|
||||
EXPOSE 161
|
||||
|
||||
ENTRYPOINT ["/tmp/bootstrap.sh"]
|
27
devenv/docker/blocks/smtp/bootstrap.sh
Executable file
27
devenv/docker/blocks/smtp/bootstrap.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -u
|
||||
|
||||
# User params
|
||||
USER_PARAMS=$@
|
||||
|
||||
# Internal params
|
||||
RUN_CMD="snmpd -f ${USER_PARAMS}"
|
||||
|
||||
#######################################
|
||||
# Echo/log function
|
||||
# Arguments:
|
||||
# String: value to log
|
||||
#######################################
|
||||
log() {
|
||||
if [[ "$@" ]]; then echo "[`date +'%Y-%m-%d %T'`] $@";
|
||||
else echo; fi
|
||||
}
|
||||
|
||||
# Launch
|
||||
log $RUN_CMD
|
||||
$RUN_CMD
|
||||
|
||||
# Exit immediately in case of any errors or when we have interactive terminal
|
||||
if [[ $? != 0 ]] || test -t 0; then exit $?; fi
|
||||
log
|
20
devenv/docker/buildcontainer/Dockerfile
Normal file
20
devenv/docker/buildcontainer/Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
FROM centos:6.6
|
||||
|
||||
RUN yum install -y initscripts curl tar gcc libc6-dev git
|
||||
|
||||
ENV GOLANG_VERSION 1.4.2
|
||||
|
||||
RUN curl -sSL https://golang.org/dl/go$GOLANG_VERSION.src.tar.gz \
|
||||
| tar -v -C /usr/src -xz
|
||||
|
||||
RUN cd /usr/src/go/src && ./make.bash --no-clean 2>&1
|
||||
|
||||
ENV PATH /usr/src/go/bin:$PATH
|
||||
|
||||
RUN mkdir -p /go/src /go/bin && chmod -R 777 /go
|
||||
ENV GOPATH /go
|
||||
ENV PATH /go/bin:$PATH
|
||||
|
||||
WORKDIR /go/src/github.com/grafana/grafana
|
||||
|
||||
CMD ["go", "run", "build.go", "build"]
|
10
devenv/docker/buildcontainer/build.sh
Executable file
10
devenv/docker/buildcontainer/build.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker kill gfbuild
|
||||
docker rm gfbuild
|
||||
|
||||
docker build --tag "grafana/buildcontainer" docker/buildcontainer
|
||||
|
||||
docker run -i -t \
|
||||
-v /home/torkel/dev/go:/go \
|
||||
--name gfbuild grafana/buildcontainer
|
14
devenv/docker/buildcontainer/build_circle.sh
Executable file
14
devenv/docker/buildcontainer/build_circle.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker info && docker version
|
||||
mkdir -p ~/docker
|
||||
|
||||
# cache some Docker images to make builds faster
|
||||
if [[ -e ~/docker/centos.tar ]]; then
|
||||
docker load -i ~/docker/centos.tar;
|
||||
else
|
||||
docker build --tag "grafana/buildcontainer" docker/buildcontainer
|
||||
docker save grafana/buildcontainer > ~/docker/centos.tar;
|
||||
fi
|
||||
|
||||
|
5
devenv/docker/buildcontainer/run_circle.sh
Executable file
5
devenv/docker/buildcontainer/run_circle.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker run -i -t \
|
||||
-v /home/ubuntu/.go_workspace:/go \
|
||||
--name gfbuild grafana/buildcontainer
|
2
devenv/docker/compose_header.yml
Normal file
2
devenv/docker/compose_header.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
version: "2"
|
||||
services:
|
6
devenv/docker/debtest/Dockerfile
Normal file
6
devenv/docker/debtest/Dockerfile
Normal file
@@ -0,0 +1,6 @@
|
||||
FROM debian:jessie
|
||||
|
||||
RUN apt-get update && apt-get install -y vim
|
||||
|
||||
ADD *.deb /tmp/
|
||||
|
10
devenv/docker/debtest/build.sh
Executable file
10
devenv/docker/debtest/build.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
cp Dockerfile ../../dist
|
||||
cd ../../dist
|
||||
|
||||
docker build --tag "grafana/debtest" .
|
||||
|
||||
rm Dockerfile
|
||||
|
||||
docker run -i -t grafana/debtest /bin/bash
|
10
devenv/docker/rpmtest/build.sh
Executable file
10
devenv/docker/rpmtest/build.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
cp Dockerfile ../../dist
|
||||
cd ../../dist
|
||||
|
||||
docker build --tag "grafana/rpmtest" .
|
||||
|
||||
rm Dockerfile
|
||||
|
||||
docker run -i -t grafana/rpmtest /bin/bash
|
Reference in New Issue
Block a user