mirror of https://github.com/grafana/grafana
Devenv: Graphite docker blocks fixes (#32352)
* Fix running graphite data sources in dev env * Remove broken dashboard files * Add Graphite 1.0 and 0.9 to provisioned data sourcespull/32466/head
parent
678dcf6811
commit
3fcff2555a
@ -1,16 +1,16 @@ |
||||
graphite09: |
||||
build: docker/blocks/graphite |
||||
graphite11: |
||||
image: graphiteapp/graphite-statsd |
||||
ports: |
||||
- "8080:80" |
||||
- "2003:2003" |
||||
volumes: |
||||
- /etc/localtime:/etc/localtime:ro |
||||
- /etc/timezone:/etc/timezone:ro |
||||
- "8180:80" |
||||
- "2103-2104:2003-2004" |
||||
- "2123-2124:2023-2024" |
||||
|
||||
fake-graphite-data: |
||||
fake-graphite11-data: |
||||
image: grafana/fake-data-gen |
||||
network_mode: bridge |
||||
environment: |
||||
FD_DATASOURCE: graphite |
||||
FD_PORT: 2003 |
||||
|
||||
FD_PORT: 2103 |
||||
FD_GRAPHITE_VERSION: 1.1 |
||||
depends_on: |
||||
- graphite11 |
||||
|
||||
@ -0,0 +1,19 @@ |
||||
graphite09: |
||||
build: docker/blocks/graphite09 |
||||
ports: |
||||
- "8380:80" |
||||
- "2303:2003" |
||||
- "2304:2004" |
||||
volumes: |
||||
- /etc/localtime:/etc/localtime:ro |
||||
- /etc/timezone:/etc/timezone:ro |
||||
|
||||
fake-graphite09-data: |
||||
image: grafana/fake-data-gen |
||||
network_mode: bridge |
||||
environment: |
||||
FD_DATASOURCE: graphite |
||||
FD_PORT: 2303 |
||||
FD_GRAPHITE_VERSION: 0.9 |
||||
|
||||
|
||||
@ -1,124 +0,0 @@ |
||||
FROM phusion/baseimage:0.9.22 |
||||
MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com> |
||||
|
||||
|
||||
RUN apt-get -y update \ |
||||
&& apt-get -y upgrade \ |
||||
&& apt-get -y install vim \ |
||||
nginx \ |
||||
python-dev \ |
||||
python-flup \ |
||||
python-pip \ |
||||
python-ldap \ |
||||
expect \ |
||||
git \ |
||||
memcached \ |
||||
sqlite3 \ |
||||
libffi-dev \ |
||||
libcairo2 \ |
||||
libcairo2-dev \ |
||||
python-cairo \ |
||||
python-rrdtool \ |
||||
pkg-config \ |
||||
nodejs \ |
||||
&& rm -rf /var/lib/apt/lists/* |
||||
|
||||
# choose a timezone at build-time |
||||
# use `--build-arg CONTAINER_TIMEZONE=Europe/Brussels` in `docker build` |
||||
ARG CONTAINER_TIMEZONE |
||||
ENV DEBIAN_FRONTEND noninteractive |
||||
|
||||
RUN if [ ! -z "${CONTAINER_TIMEZONE}" ]; \ |
||||
then ln -sf /usr/share/zoneinfo/$CONTAINER_TIMEZONE /etc/localtime && \ |
||||
dpkg-reconfigure -f noninteractive tzdata; \ |
||||
fi |
||||
|
||||
# fix python dependencies (LTS Django and newer memcached/txAMQP) |
||||
RUN pip install --upgrade pip && \ |
||||
pip install --no-cache-dir django==1.8.18 \ |
||||
python-memcached==1.53 \ |
||||
txAMQP==0.6.2 |
||||
|
||||
ARG version=1.0.2 |
||||
ARG whisper_version=${version} |
||||
ARG carbon_version=${version} |
||||
ARG graphite_version=${version} |
||||
|
||||
RUN echo "Building Version: $version" |
||||
|
||||
ARG whisper_repo=https://github.com/graphite-project/whisper.git |
||||
ARG carbon_repo=https://github.com/graphite-project/carbon.git |
||||
ARG graphite_repo=https://github.com/graphite-project/graphite-web.git |
||||
|
||||
ARG statsd_version=v0.8.0 |
||||
|
||||
ARG statsd_repo=https://github.com/etsy/statsd.git |
||||
|
||||
# install whisper |
||||
RUN git clone -b ${whisper_version} --depth 1 ${whisper_repo} /usr/local/src/whisper |
||||
WORKDIR /usr/local/src/whisper |
||||
RUN python ./setup.py install |
||||
|
||||
# install carbon |
||||
RUN git clone -b ${carbon_version} --depth 1 ${carbon_repo} /usr/local/src/carbon |
||||
WORKDIR /usr/local/src/carbon |
||||
RUN pip install --no-cache-dir -r requirements.txt \ |
||||
&& python ./setup.py install |
||||
|
||||
# install graphite |
||||
RUN git clone -b ${graphite_version} --depth 1 ${graphite_repo} /usr/local/src/graphite-web |
||||
WORKDIR /usr/local/src/graphite-web |
||||
RUN pip install --no-cache-dir -r requirements.txt \ |
||||
&& python ./setup.py install |
||||
|
||||
# install statsd |
||||
RUN git clone -b ${statsd_version} ${statsd_repo} /opt/statsd |
||||
|
||||
# config graphite |
||||
ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/ |
||||
ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py |
||||
# ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py |
||||
WORKDIR /opt/graphite/webapp |
||||
RUN mkdir -p /var/log/graphite/ \ |
||||
&& PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings |
||||
|
||||
# config statsd |
||||
ADD conf/opt/statsd/config.js /opt/statsd/ |
||||
|
||||
# config nginx |
||||
RUN rm /etc/nginx/sites-enabled/default |
||||
ADD conf/etc/nginx/nginx.conf /etc/nginx/nginx.conf |
||||
ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/graphite-statsd.conf |
||||
|
||||
# init django admin |
||||
ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp |
||||
ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh |
||||
RUN chmod +x /usr/local/bin/manage.sh && /usr/local/bin/django_admin_init.exp |
||||
|
||||
# logging support |
||||
RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx |
||||
ADD conf/etc/logrotate.d/graphite-statsd /etc/logrotate.d/graphite-statsd |
||||
|
||||
# daemons |
||||
ADD conf/etc/service/carbon/run /etc/service/carbon/run |
||||
ADD conf/etc/service/carbon-aggregator/run /etc/service/carbon-aggregator/run |
||||
ADD conf/etc/service/graphite/run /etc/service/graphite/run |
||||
ADD conf/etc/service/statsd/run /etc/service/statsd/run |
||||
ADD conf/etc/service/nginx/run /etc/service/nginx/run |
||||
|
||||
# default conf setup |
||||
ADD conf /etc/graphite-statsd/conf |
||||
ADD conf/etc/my_init.d/01_conf_init.sh /etc/my_init.d/01_conf_init.sh |
||||
|
||||
# cleanup |
||||
RUN apt-get clean\ |
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |
||||
|
||||
# defaults |
||||
EXPOSE 80 2003-2004 2023-2024 8125 8125/udp 8126 |
||||
VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"] |
||||
WORKDIR / |
||||
ENV HOME /root |
||||
ENV STATSD_INTERFACE udp |
||||
|
||||
CMD ["/sbin/my_init"] |
||||
File diff suppressed because it is too large
Load Diff
@ -1,11 +0,0 @@ |
||||
/var/log/*.log /var/log/*/*.log { |
||||
weekly |
||||
size 50M |
||||
missingok |
||||
rotate 10 |
||||
compress |
||||
delaycompress |
||||
notifempty |
||||
copytruncate |
||||
su root syslog |
||||
} |
||||
@ -1,36 +0,0 @@ |
||||
#!/bin/bash |
||||
|
||||
conf_dir=/etc/graphite-statsd/conf |
||||
|
||||
# auto setup graphite with default configs if /opt/graphite is missing |
||||
# needed for the use case when a docker host volume is mounted at an of the following: |
||||
# - /opt/graphite |
||||
# - /opt/graphite/conf |
||||
# - /opt/graphite/webapp/graphite |
||||
graphite_dir_contents=$(find /opt/graphite -mindepth 1 -print -quit) |
||||
graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit) |
||||
graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit) |
||||
graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit) |
||||
if [[ -z $graphite_dir_contents ]]; then |
||||
# git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web |
||||
cd /usr/local/src/graphite-web && python ./setup.py install |
||||
fi |
||||
if [[ -z $graphite_storage_dir_contents ]]; then |
||||
/usr/local/bin/django_admin_init.exp |
||||
fi |
||||
if [[ -z $graphite_conf_dir_contents ]]; then |
||||
cp -R $conf_dir/opt/graphite/conf/*.conf /opt/graphite/conf/ |
||||
fi |
||||
if [[ -z $graphite_webapp_dir_contents ]]; then |
||||
cp $conf_dir/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py |
||||
fi |
||||
|
||||
# auto setup statsd with default config if /opt/statsd is missing |
||||
# needed for the use case when a docker host volume is mounted at an of the following: |
||||
# - /opt/statsd |
||||
statsd_dir_contents=$(find /opt/statsd -mindepth 1 -print -quit) |
||||
if [[ -z $statsd_dir_contents ]]; then |
||||
git clone --depth 1 -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd |
||||
cp $conf_dir/opt/statsd/config.js /opt/statsd/config.js |
||||
fi |
||||
|
||||
@ -1,96 +0,0 @@ |
||||
user www-data; |
||||
worker_processes 4; |
||||
pid /run/nginx.pid; |
||||
daemon off; |
||||
|
||||
events { |
||||
worker_connections 768; |
||||
# multi_accept on; |
||||
} |
||||
|
||||
http { |
||||
|
||||
## |
||||
# Basic Settings |
||||
## |
||||
|
||||
sendfile on; |
||||
tcp_nopush on; |
||||
tcp_nodelay on; |
||||
keepalive_timeout 65; |
||||
types_hash_max_size 2048; |
||||
# server_tokens off; |
||||
|
||||
# server_names_hash_bucket_size 64; |
||||
# server_name_in_redirect off; |
||||
|
||||
include /etc/nginx/mime.types; |
||||
default_type application/octet-stream; |
||||
|
||||
## |
||||
# Logging Settings |
||||
## |
||||
|
||||
access_log /var/log/nginx/access.log; |
||||
error_log /var/log/nginx/error.log; |
||||
|
||||
## |
||||
# Gzip Settings |
||||
## |
||||
|
||||
gzip on; |
||||
gzip_disable "msie6"; |
||||
|
||||
# gzip_vary on; |
||||
# gzip_proxied any; |
||||
# gzip_comp_level 6; |
||||
# gzip_buffers 16 8k; |
||||
# gzip_http_version 1.1; |
||||
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; |
||||
|
||||
## |
||||
# nginx-naxsi config |
||||
## |
||||
# Uncomment it if you installed nginx-naxsi |
||||
## |
||||
|
||||
#include /etc/nginx/naxsi_core.rules; |
||||
|
||||
## |
||||
# nginx-passenger config |
||||
## |
||||
# Uncomment it if you installed nginx-passenger |
||||
## |
||||
|
||||
#passenger_root /usr; |
||||
#passenger_ruby /usr/bin/ruby; |
||||
|
||||
## |
||||
# Virtual Host Configs |
||||
## |
||||
|
||||
include /etc/nginx/conf.d/*.conf; |
||||
include /etc/nginx/sites-enabled/*; |
||||
} |
||||
|
||||
|
||||
#mail { |
||||
# # See sample authentication script at: |
||||
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript |
||||
# |
||||
# # auth_http localhost/auth.php; |
||||
# # pop3_capabilities "TOP" "USER"; |
||||
# # imap_capabilities "IMAP4rev1" "UIDPLUS"; |
||||
# |
||||
# server { |
||||
# listen localhost:110; |
||||
# protocol pop3; |
||||
# proxy on; |
||||
# } |
||||
# |
||||
# server { |
||||
# listen localhost:143; |
||||
# protocol imap; |
||||
# proxy on; |
||||
# } |
||||
#} |
||||
@ -1,31 +0,0 @@ |
||||
server { |
||||
listen 80; |
||||
root /opt/graphite/static; |
||||
index index.html; |
||||
|
||||
location /media { |
||||
# django admin static files |
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/media/; |
||||
} |
||||
|
||||
location /admin/auth/admin { |
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin; |
||||
} |
||||
|
||||
location /admin/auth/user/admin { |
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin; |
||||
} |
||||
|
||||
location / { |
||||
proxy_pass http://localhost:8080; |
||||
proxy_set_header Host $host; |
||||
proxy_set_header X-Real-IP $remote_addr; |
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
||||
|
||||
add_header 'Access-Control-Allow-Origin' '*'; |
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; |
||||
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type'; |
||||
add_header 'Access-Control-Allow-Credentials' 'true'; |
||||
} |
||||
|
||||
} |
||||
@ -1,4 +0,0 @@ |
||||
#!/bin/bash |
||||
|
||||
rm -f /opt/graphite/storage/carbon-aggregator-a.pid |
||||
exec /usr/bin/python /opt/graphite/bin/carbon-aggregator.py start --debug 2>&1 >> /var/log/carbon-aggregator.log |
||||
@ -1,4 +0,0 @@ |
||||
#!/bin/bash |
||||
|
||||
rm -f /opt/graphite/storage/carbon-cache-a.pid |
||||
exec /usr/bin/python /opt/graphite/bin/carbon-cache.py start --debug 2>&1 >> /var/log/carbon.log |
||||
@ -1,3 +0,0 @@ |
||||
#!/bin/bash |
||||
|
||||
export PYTHONPATH=/opt/graphite/webapp && exec /usr/local/bin/gunicorn wsgi --workers=4 --bind=127.0.0.1:8080 --log-file=/var/log/gunicorn.log --preload --pythonpath=/opt/graphite/webapp/graphite |
||||
@ -1,4 +0,0 @@ |
||||
#!/bin/bash |
||||
|
||||
mkdir -p /var/log/nginx |
||||
exec /usr/sbin/nginx -c /etc/nginx/nginx.conf |
||||
@ -1,4 +0,0 @@ |
||||
#!/bin/bash |
||||
|
||||
exec /usr/bin/nodejs /opt/statsd/stats.js /opt/statsd/config.js >> /var/log/statsd.log 2>&1 |
||||
|
||||
@ -1,35 +0,0 @@ |
||||
# The form of each line in this file should be as follows: |
||||
# |
||||
# output_template (frequency) = method input_pattern |
||||
# |
||||
# This will capture any received metrics that match 'input_pattern' |
||||
# for calculating an aggregate metric. The calculation will occur |
||||
# every 'frequency' seconds and the 'method' can specify 'sum' or |
||||
# 'avg'. The name of the aggregate metric will be derived from |
||||
# 'output_template' filling in any captured fields from 'input_pattern'. |
||||
# |
||||
# For example, if your metric naming scheme is: |
||||
# |
||||
# <env>.applications.<app>.<server>.<metric> |
||||
# |
||||
# You could configure some aggregations like so: |
||||
# |
||||
# <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests |
||||
# <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency |
||||
# |
||||
# As an example, if the following metrics are received: |
||||
# |
||||
# prod.applications.apache.www01.requests |
||||
# prod.applications.apache.www01.requests |
||||
# |
||||
# They would all go into the same aggregation buffer and after 60 seconds the |
||||
# aggregate metric 'prod.applications.apache.all.requests' would be calculated |
||||
# by summing their values. |
||||
# |
||||
# Template components such as <env> will match everything up to the next dot. |
||||
# To match metric multiple components including the dots, use <<metric>> in the |
||||
# input template: |
||||
# |
||||
# <env>.applications.<app>.all.<app_metric> (60) = sum <env>.applications.<app>.*.<<app_metric>> |
||||
# |
||||
# Note that any time this file is modified, it will be re-read automatically. |
||||
@ -1,5 +0,0 @@ |
||||
# This file takes a single regular expression per line |
||||
# If USE_WHITELIST is set to True in carbon.conf, any metrics received which |
||||
# match one of these expressions will be dropped |
||||
# This file is reloaded automatically when changes are made |
||||
^some\.noisy\.metric\.prefix\..* |
||||
@ -1,75 +0,0 @@ |
||||
# This is a configuration file with AMQP enabled |
||||
|
||||
[cache] |
||||
LOCAL_DATA_DIR = |
||||
|
||||
# Specify the user to drop privileges to |
||||
# If this is blank carbon runs as the user that invokes it |
||||
# This user must have write access to the local data directory |
||||
USER = |
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound. |
||||
# Sorts and serving cache queries gets more expensive as the cache grows. |
||||
# Use the value "inf" (infinity) for an unlimited cache size. |
||||
MAX_CACHE_SIZE = inf |
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively |
||||
# means the number of write requests sent to the disk. This is intended to |
||||
# prevent over-utilizing the disk and thus starving the rest of the system. |
||||
# When the rate of required updates exceeds this, then carbon's caching will |
||||
# take effect and increase the overall throughput accordingly. |
||||
MAX_UPDATES_PER_SECOND = 1000 |
||||
|
||||
# Softly limits the number of whisper files that get created each minute. |
||||
# Setting this value low (like at 50) is a good way to ensure your graphite |
||||
# system will not be adversely impacted when a bunch of new metrics are |
||||
# sent to it. The trade off is that it will take much longer for those metrics' |
||||
# database files to all get created and thus longer until the data becomes usable. |
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create |
||||
# the files quickly but at the risk of slowing I/O down considerably for a while. |
||||
MAX_CREATES_PER_MINUTE = inf |
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0 |
||||
LINE_RECEIVER_PORT = 2003 |
||||
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0 |
||||
UDP_RECEIVER_PORT = 2003 |
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0 |
||||
PICKLE_RECEIVER_PORT = 2004 |
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0 |
||||
CACHE_QUERY_PORT = 7002 |
||||
|
||||
# Enable AMQP if you want to receive metrics using you amqp broker |
||||
ENABLE_AMQP = True |
||||
|
||||
# Verbose means a line will be logged for every metric received |
||||
# useful for testing |
||||
AMQP_VERBOSE = True |
||||
|
||||
# your credentials for the amqp server |
||||
# AMQP_USER = guest |
||||
# AMQP_PASSWORD = guest |
||||
|
||||
# the network settings for the amqp server |
||||
# AMQP_HOST = localhost |
||||
# AMQP_PORT = 5672 |
||||
|
||||
# if you want to include the metric name as part of the message body |
||||
# instead of as the routing key, set this to True |
||||
# AMQP_METRIC_NAME_IN_BODY = False |
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server |
||||
# with the default configuration, you have to specify a distinict |
||||
# interfaces and ports for the listeners. |
||||
|
||||
[relay] |
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0 |
||||
LINE_RECEIVER_PORT = 2003 |
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0 |
||||
PICKLE_RECEIVER_PORT = 2004 |
||||
|
||||
CACHE_SERVERS = server1, server2, server3 |
||||
MAX_QUEUE_SIZE = 10000 |
||||
@ -1,594 +0,0 @@ |
||||
[cache] |
||||
# Configure carbon directories. |
||||
# |
||||
# OS environment variables can be used to tell carbon where graphite is |
||||
# installed, where to read configuration from and where to write data. |
||||
# |
||||
# GRAPHITE_ROOT - Root directory of the graphite installation. |
||||
# Defaults to ../ |
||||
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives). |
||||
# Defaults to $GRAPHITE_ROOT/conf/ |
||||
# GRAPHITE_STORAGE_DIR - Storage directory for whisper/rrd/log/pid files. |
||||
# Defaults to $GRAPHITE_ROOT/storage/ |
||||
# |
||||
# To change other directory paths, add settings to this file. The following |
||||
# configuration variables are available with these default values: |
||||
# |
||||
# STORAGE_DIR = $GRAPHITE_STORAGE_DIR |
||||
# LOCAL_DATA_DIR = %(STORAGE_DIR)s/whisper/ |
||||
# WHITELISTS_DIR = %(STORAGE_DIR)s/lists/ |
||||
# CONF_DIR = %(STORAGE_DIR)s/conf/ |
||||
# LOG_DIR = %(STORAGE_DIR)s/log/ |
||||
# PID_DIR = %(STORAGE_DIR)s/ |
||||
# |
||||
# For FHS style directory structures, use: |
||||
# |
||||
# STORAGE_DIR = /var/lib/carbon/ |
||||
# CONF_DIR = /etc/carbon/ |
||||
# LOG_DIR = /var/log/carbon/ |
||||
# PID_DIR = /var/run/ |
||||
# |
||||
#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/ |
||||
|
||||
# Specify the database library used to store metric data on disk. Each database |
||||
# may have configurable options to change the behaviour of how it writes to |
||||
# persistent storage. |
||||
# |
||||
# whisper - Fixed-size database, similar in design and purpose to RRD. This is |
||||
# the default storage backend for carbon and the most rigorously tested. |
||||
# |
||||
# ceres - Experimental alternative database that supports storing data in sparse |
||||
# files of arbitrary fixed-size resolutions. |
||||
DATABASE = whisper |
||||
|
||||
# Enable daily log rotation. If disabled, a new file will be opened whenever the log file path no |
||||
# longer exists (i.e. it is removed or renamed) |
||||
ENABLE_LOGROTATION = True |
||||
|
||||
# Specify the user to drop privileges to |
||||
# If this is blank carbon-cache runs as the user that invokes it |
||||
# This user must have write access to the local data directory |
||||
USER = |
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound. |
||||
# Sorts and serving cache queries gets more expensive as the cache grows. |
||||
# Use the value "inf" (infinity) for an unlimited cache size. |
||||
# value should be an integer number of metric datapoints. |
||||
MAX_CACHE_SIZE = inf |
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively |
||||
# means the number of write requests sent to the disk. This is intended to |
||||
# prevent over-utilizing the disk and thus starving the rest of the system. |
||||
# When the rate of required updates exceeds this, then carbon's caching will |
||||
# take effect and increase the overall throughput accordingly. |
||||
MAX_UPDATES_PER_SECOND = 500 |
||||
|
||||
# If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a |
||||
# stop/shutdown is initiated. This helps when MAX_UPDATES_PER_SECOND is |
||||
# relatively low and carbon has cached a lot of updates; it enables the carbon |
||||
# daemon to shutdown more quickly. |
||||
# MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000 |
||||
|
||||
# Softly limits the number of whisper files that get created each minute. |
||||
# Setting this value low (e.g. 50) is a good way to ensure that your carbon |
||||
# system will not be adversely impacted when a bunch of new metrics are |
||||
# sent to it. The trade off is that any metrics received in excess of this |
||||
# value will be silently dropped, and the whisper file will not be created |
||||
# until such point as a subsequent metric is received and fits within the |
||||
# defined rate limit. Setting this value high (like "inf" for infinity) will |
||||
# cause carbon to create the files quickly but at the risk of increased I/O. |
||||
MAX_CREATES_PER_MINUTE = 50 |
||||
|
||||
# Set the minimum timestamp resolution supported by this instance. This allows |
||||
# internal optimisations by overwriting points with equal truncated timestamps |
||||
# in order to limit the number of updates to the database. It defaults to one |
||||
# second. |
||||
MIN_TIMESTAMP_RESOLUTION = 1 |
||||
|
||||
# Set the minimum lag in seconds for a point to be written to the database |
||||
# in order to optimize batching. This means that each point will wait at least |
||||
# the duration of this lag before being written. Setting this to 0 disable the feature. |
||||
# This currently only works when using the timesorted write strategy. |
||||
# MIN_TIMESTAMP_LAG = 0 |
||||
|
||||
# Set the interface and port for the line (plain text) listener. Setting the |
||||
# interface to 0.0.0.0 listens on all interfaces. Port can be set to 0 to |
||||
# disable this listener if it is not required. |
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0 |
||||
LINE_RECEIVER_PORT = 2003 |
||||
|
||||
# Set this to True to enable the UDP listener. By default this is off |
||||
# because it is very common to run multiple carbon daemons and managing |
||||
# another (rarely used) port for every carbon instance is not fun. |
||||
ENABLE_UDP_LISTENER = False |
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0 |
||||
UDP_RECEIVER_PORT = 2003 |
||||
|
||||
# Set the interface and port for the pickle listener. Setting the interface to |
||||
# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this |
||||
# listener if it is not required. |
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0 |
||||
PICKLE_RECEIVER_PORT = 2004 |
||||
|
||||
# Set the interface and port for the protobuf listener. Setting the interface to |
||||
# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this |
||||
# listener if it is not required. |
||||
# PROTOBUF_RECEIVER_INTERFACE = 0.0.0.0 |
||||
# PROTOBUF_RECEIVER_PORT = 2005 |
||||
|
||||
# Limit the number of open connections the receiver can handle as any time. |
||||
# Default is no limit. Setting up a limit for sites handling high volume |
||||
# traffic may be recommended to avoid running out of TCP memory or having |
||||
# thousands of TCP connections reduce the throughput of the service. |
||||
#MAX_RECEIVER_CONNECTIONS = inf |
||||
|
||||
# Per security concerns outlined in Bug #817247 the pickle receiver |
||||
# will use a more secure and slightly less efficient unpickler. |
||||
# Set this to True to revert to the old-fashioned insecure unpickler. |
||||
USE_INSECURE_UNPICKLER = False |
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0 |
||||
CACHE_QUERY_PORT = 7002 |
||||
|
||||
# Set this to False to drop datapoints received after the cache |
||||
# reaches MAX_CACHE_SIZE. If this is True (the default) then sockets |
||||
# over which metrics are received will temporarily stop accepting |
||||
# data until the cache size falls below 95% MAX_CACHE_SIZE. |
||||
USE_FLOW_CONTROL = True |
||||
|
||||
# If enabled this setting is used to timeout metric client connection if no |
||||
# metrics have been sent in specified time in seconds |
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None |
||||
|
||||
# By default, carbon-cache will log every whisper update and cache hit. |
||||
# This can be excessive and degrade performance if logging on the same |
||||
# volume as the whisper data is stored. |
||||
LOG_UPDATES = False |
||||
LOG_CREATES = False |
||||
LOG_CACHE_HITS = False |
||||
LOG_CACHE_QUEUE_SORTS = False |
||||
|
||||
# The thread that writes metrics to disk can use one of the following strategies |
||||
# determining the order in which metrics are removed from cache and flushed to |
||||
# disk. The default option preserves the same behavior as has been historically |
||||
# available in version 0.9.10. |
||||
# |
||||
# sorted - All metrics in the cache will be counted and an ordered list of |
||||
# them will be sorted according to the number of datapoints in the cache at the |
||||
# moment of the list's creation. Metrics will then be flushed from the cache to |
||||
# disk in that order. |
||||
# |
||||
# timesorted - All metrics in the list will be looked at and sorted according |
||||
# to the timestamp of there datapoints. The metric that were the least recently |
||||
# written will be written first. This is an hybrid strategy between max and |
||||
# sorted which is particularly adapted to sets of metrics with non-uniform |
||||
# resolutions. |
||||
# |
||||
# max - The writer thread will always pop and flush the metric from cache |
||||
# that has the most datapoints. This will give a strong flush preference to |
||||
# frequently updated metrics and will also reduce random file-io. Infrequently |
||||
# updated metrics may only ever be persisted to disk at daemon shutdown if |
||||
# there are a large number of metrics which receive very frequent updates OR if |
||||
# disk i/o is very slow. |
||||
# |
||||
# naive - Metrics will be flushed from the cache to disk in an unordered |
||||
# fashion. This strategy may be desirable in situations where the storage for |
||||
# whisper files is solid state, CPU resources are very limited or deference to |
||||
# the OS's i/o scheduler is expected to compensate for the random write |
||||
# pattern. |
||||
# |
||||
CACHE_WRITE_STRATEGY = sorted |
||||
|
||||
# On some systems it is desirable for whisper to write synchronously. |
||||
# Set this option to True if you'd like to try this. Basically it will |
||||
# shift the onus of buffering writes from the kernel into carbon's cache. |
||||
WHISPER_AUTOFLUSH = False |
||||
|
||||
# By default new Whisper files are created pre-allocated with the data region |
||||
# filled with zeros to prevent fragmentation and speed up contiguous reads and |
||||
# writes (which are common). Enabling this option will cause Whisper to create |
||||
# the file sparsely instead. Enabling this option may allow a large increase of |
||||
# MAX_CREATES_PER_MINUTE but may have longer term performance implications |
||||
# depending on the underlying storage configuration. |
||||
# WHISPER_SPARSE_CREATE = False |
||||
|
||||
# Only beneficial on linux filesystems that support the fallocate system call. |
||||
# It maintains the benefits of contiguous reads/writes, but with a potentially |
||||
# much faster creation speed, by allowing the kernel to handle the block |
||||
# allocation and zero-ing. Enabling this option may allow a large increase of |
||||
# MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported |
||||
# this option will gracefully fallback to standard POSIX file access methods. |
||||
WHISPER_FALLOCATE_CREATE = True |
||||
|
||||
# Enabling this option will cause Whisper to lock each Whisper file it writes |
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when |
||||
# multiple carbon-cache daemons are writing to the same files. |
||||
# WHISPER_LOCK_WRITES = False |
||||
|
||||
# On systems which has a large number of metrics, an amount of Whisper write(2)'s |
||||
# pageback sometimes cause disk thrashing due to memory shortage, so that abnormal |
||||
# disk reads occur. Enabling this option makes it possible to decrease useless |
||||
# page cache memory by posix_fadvise(2) with POSIX_FADVISE_RANDOM option. |
||||
# WHISPER_FADVISE_RANDOM = False |
||||
|
||||
# By default all nodes stored in Ceres are cached in memory to improve the |
||||
# throughput of reads and writes to underlying slices. Turning this off will |
||||
# greatly reduce memory consumption for databases with millions of metrics, at |
||||
# the cost of a steep increase in disk i/o, approximately an extra two os.stat |
||||
# calls for every read and write. Reasons to do this are if the underlying |
||||
# storage can handle stat() with practically zero cost (SSD, NVMe, zRAM). |
||||
# Valid values are: |
||||
# all - all nodes are cached |
||||
# none - node caching is disabled |
||||
# CERES_NODE_CACHING_BEHAVIOR = all |
||||
|
||||
# Ceres nodes can have many slices and caching the right ones can improve |
||||
# performance dramatically. Note that there are many trade-offs to tinkering |
||||
# with this, and unless you are a ceres developer you *really* should not |
||||
# mess with this. Valid values are: |
||||
# latest - only the most recent slice is cached |
||||
# all - all slices are cached |
||||
# none - slice caching is disabled |
||||
# CERES_SLICE_CACHING_BEHAVIOR = latest |
||||
|
||||
# If a Ceres node accumulates too many slices, performance can suffer. |
||||
# This can be caused by intermittently reported data. To mitigate |
||||
# slice fragmentation there is a tolerance for how much space can be |
||||
# wasted within a slice file to avoid creating a new one. That tolerance |
||||
# level is determined by MAX_SLICE_GAP, which is the number of consecutive |
||||
# null datapoints allowed in a slice file. |
||||
# If you set this very low, you will waste less of the *tiny* bit disk space |
||||
# that this feature wastes, and you will be prone to performance problems |
||||
# caused by slice fragmentation, which can be pretty severe. |
||||
# If you set this really high, you will waste a bit more disk space (each |
||||
# null datapoint wastes 8 bytes, but keep in mind your filesystem's block |
||||
# size). If you suffer slice fragmentation issues, you should increase this or |
||||
# run the ceres-maintenance defrag plugin more often. However you should not |
||||
# set it to be huge because then if a large but allowed gap occurs it has to |
||||
# get filled in, which means instead of a simple 8-byte write to a new file we |
||||
# could end up doing an (8 * MAX_SLICE_GAP)-byte write to the latest slice. |
||||
# CERES_MAX_SLICE_GAP = 80 |
||||
|
||||
# Enabling this option will cause Ceres to lock each Ceres file it writes to |
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when |
||||
# multiple carbon-cache daemons are writing to the same files. |
||||
# CERES_LOCK_WRITES = False |
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in |
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is |
||||
# missing or empty, all metrics will pass through |
||||
# USE_WHITELIST = False |
||||
|
||||
# By default, carbon itself will log statistics (such as a count, |
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60 |
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation |
||||
# CARBON_METRIC_PREFIX = carbon |
||||
# CARBON_METRIC_INTERVAL = 60 |
||||
|
||||
# Enable AMQP if you want to receive metrics using an amqp broker |
||||
# ENABLE_AMQP = False |
||||
|
||||
# Verbose means a line will be logged for every metric received |
||||
# useful for testing |
||||
# AMQP_VERBOSE = False |
||||
|
||||
# AMQP_HOST = localhost |
||||
# AMQP_PORT = 5672 |
||||
# AMQP_VHOST = / |
||||
# AMQP_USER = guest |
||||
# AMQP_PASSWORD = guest |
||||
# AMQP_EXCHANGE = graphite |
||||
# AMQP_METRIC_NAME_IN_BODY = False |
||||
|
||||
# The manhole interface allows you to SSH into the carbon daemon |
||||
# and get a python interpreter. BE CAREFUL WITH THIS! If you do |
||||
# something like time.sleep() in the interpreter, the whole process |
||||
# will sleep! This is *extremely* helpful in debugging, assuming |
||||
# you are familiar with the code. If you are not, please don't |
||||
# mess with this, you are asking for trouble :) |
||||
# |
||||
# ENABLE_MANHOLE = False |
||||
# MANHOLE_INTERFACE = 127.0.0.1 |
||||
# MANHOLE_PORT = 7222 |
||||
# MANHOLE_USER = admin |
||||
# MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE= |
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at |
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings |
||||
# |
||||
# Example: store all sales, linux servers, and utilization metrics |
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization |
||||
# |
||||
# Example: store everything |
||||
# BIND_PATTERNS = # |
||||
|
||||
# URL of graphite-web instance, this is used to add incoming series to the tag database |
||||
GRAPHITE_URL = http://127.0.0.1:80 |
||||
|
||||
# Tag update interval, this specifies how frequently updates to existing series will trigger |
||||
# an update to the tag index, the default setting is once every 100 updates |
||||
# TAG_UPDATE_INTERVAL = 100 |
||||
|
||||
# To configure special settings for the carbon-cache instance 'b', uncomment this: |
||||
#[cache:b] |
||||
#LINE_RECEIVER_PORT = 2103 |
||||
#PICKLE_RECEIVER_PORT = 2104 |
||||
#CACHE_QUERY_PORT = 7102 |
||||
# and any other settings you want to customize, defaults are inherited |
||||
# from the [cache] section. |
||||
# You can then specify the --instance=b option to manage this instance |
||||
# |
||||
# In order to turn off logging of successful connections for the line |
||||
# receiver, set this to False |
||||
# LOG_LISTENER_CONN_SUCCESS = True |
||||
|
||||
[relay] |
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0 |
||||
LINE_RECEIVER_PORT = 2013 |
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0 |
||||
PICKLE_RECEIVER_PORT = 2014 |
||||
|
||||
# Carbon-relay has several options for metric routing controlled by RELAY_METHOD |
||||
# |
||||
# Use relay-rules.conf to route metrics to destinations based on pattern rules |
||||
#RELAY_METHOD = rules |
||||
# |
||||
# Use consistent-hashing for even distribution of metrics between destinations |
||||
#RELAY_METHOD = consistent-hashing |
||||
# |
||||
# Use consistent-hashing but take into account an aggregation-rules.conf shared |
||||
# by downstream carbon-aggregator daemons. This will ensure that all metrics |
||||
# that map to a given aggregation rule are sent to the same carbon-aggregator |
||||
# instance. |
||||
# Enable this for carbon-relays that send to a group of carbon-aggregators |
||||
#RELAY_METHOD = aggregated-consistent-hashing |
||||
# |
||||
# You can also use fast-hashing and fast-aggregated-hashing which are in O(1) |
||||
# and will always redirect the metrics to the same destination but do not try |
||||
# to minimize rebalancing when the list of destinations is changing. |
||||
RELAY_METHOD = rules |
||||
|
||||
# If you use consistent-hashing you can add redundancy by replicating every |
||||
# datapoint to more than one machine. |
||||
REPLICATION_FACTOR = 1 |
||||
|
||||
# For REPLICATION_FACTOR >=2, set DIVERSE_REPLICAS to True to guarantee replicas |
||||
# across distributed hosts. With this setting disabled, it's possible that replicas |
||||
# may be sent to different caches on the same host. This has been the default |
||||
# behavior since introduction of 'consistent-hashing' relay method. |
||||
# Note that enabling this on an existing pre-0.9.14 cluster will require rebalancing |
||||
# your metrics across the cluster nodes using a tool like Carbonate. |
||||
#DIVERSE_REPLICAS = True |
||||
|
||||
# This is a list of carbon daemons we will send any relayed or |
||||
# generated metrics to. The default provided would send to a single |
||||
# carbon-cache instance on the default port. However if you |
||||
# use multiple carbon-cache instances then it would look like this: |
||||
# |
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b |
||||
# |
||||
# The general form is IP:PORT:INSTANCE where the :INSTANCE part is |
||||
# optional and refers to the "None" instance if omitted. |
||||
# |
||||
# Note that if the destinations are all carbon-caches then this should |
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of |
||||
# instances listed (order matters!). |
||||
# |
||||
# If using RELAY_METHOD = rules, all destinations used in relay-rules.conf |
||||
# must be defined in this list |
||||
DESTINATIONS = 127.0.0.1:2004 |
||||
|
||||
# This define the protocol to use to contact the destination. It can be |
||||
# set to one of "line", "pickle", "udp" and "protobuf". This list can be |
||||
# extended with CarbonClientFactory plugins and defaults to "pickle". |
||||
# DESTINATION_PROTOCOL = pickle |
||||
|
||||
# When using consistent hashing it sometime makes sense to make |
||||
# the ring dynamic when you don't want to loose points when a |
||||
# single destination is down. Replication is an answer to that |
||||
# but it can be quite expensive. |
||||
# DYNAMIC_ROUTER = False |
||||
|
||||
# Controls the number of connection attempts before marking a |
||||
# destination as down. We usually do one connection attempt per |
||||
# second. |
||||
# DYNAMIC_ROUTER_MAX_RETRIES = 5 |
||||
|
||||
# This is the maximum number of datapoints that can be queued up |
||||
# for a single destination. Once this limit is hit, we will |
||||
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise |
||||
# we will drop any subsequently received datapoints. |
||||
MAX_QUEUE_SIZE = 10000 |
||||
|
||||
# This defines the maximum "message size" between carbon daemons. If |
||||
# your queue is large, setting this to a lower number will cause the |
||||
# relay to forward smaller discrete chunks of stats, which may prevent |
||||
# overloading on the receiving side after a disconnect. |
||||
MAX_DATAPOINTS_PER_MESSAGE = 500 |
||||
|
||||
# Limit the number of open connections the receiver can handle as any time. |
||||
# Default is no limit. Setting up a limit for sites handling high volume |
||||
# traffic may be recommended to avoid running out of TCP memory or having |
||||
# thousands of TCP connections reduce the throughput of the service. |
||||
#MAX_RECEIVER_CONNECTIONS = inf |
||||
|
||||
# Specify the user to drop privileges to |
||||
# If this is blank carbon-relay runs as the user that invokes it |
||||
# USER = |
||||
|
||||
# This is the percentage that the queue must be empty before it will accept |
||||
# more messages. For a larger site, if the queue is very large it makes sense |
||||
# to tune this to allow for incoming stats. So if you have an average |
||||
# flow of 100k stats/minute, and a MAX_QUEUE_SIZE of 3,000,000, it makes sense |
||||
# to allow stats to start flowing when you've cleared the queue to 95% since |
||||
# you should have space to accommodate the next minute's worth of stats |
||||
# even before the relay incrementally clears more of the queue |
||||
QUEUE_LOW_WATERMARK_PCT = 0.8 |
||||
|
||||
# To allow for batch efficiency from the pickle protocol and to benefit from |
||||
# other batching advantages, all writes are deferred by putting them into a queue, |
||||
# and then the queue is flushed and sent a small fraction of a second later. |
||||
TIME_TO_DEFER_SENDING = 0.0001 |
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints |
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the |
||||
# default) then sockets over which metrics are received will temporarily stop accepting |
||||
# data until the send queues fall below QUEUE_LOW_WATERMARK_PCT * MAX_QUEUE_SIZE. |
||||
USE_FLOW_CONTROL = True |
||||
|
||||
# If enabled this setting is used to timeout metric client connection if no |
||||
# metrics have been sent in specified time in seconds |
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None |
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in |
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is |
||||
# missing or empty, all metrics will pass through |
||||
# USE_WHITELIST = False |
||||
|
||||
# By default, carbon itself will log statistics (such as a count, |
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60 |
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation |
||||
# CARBON_METRIC_PREFIX = carbon |
||||
# CARBON_METRIC_INTERVAL = 60 |
||||
# |
||||
# In order to turn off logging of successful connections for the line |
||||
# receiver, set this to False |
||||
# LOG_LISTENER_CONN_SUCCESS = True |
||||
|
||||
# If you're connecting from the relay to a destination that's over the |
||||
# internet or similarly iffy connection, a backlog can develop because |
||||
# of internet weather conditions, e.g. acks getting lost or similar issues. |
||||
# To deal with that, you can enable USE_RATIO_RESET which will let you |
||||
# re-set the connection to an individual destination. Defaults to being off. |
||||
USE_RATIO_RESET=False |
||||
|
||||
# When there is a small number of stats flowing, it's not desirable to |
||||
# perform any actions based on percentages - it's just too "twitchy". |
||||
MIN_RESET_STAT_FLOW=1000 |
||||
|
||||
# When the ratio of stats being sent in a reporting interval is far |
||||
# enough from 1.0, we will disconnect the socket and reconnecto to |
||||
# clear out queued stats. The default ratio of 0.9 indicates that 10% |
||||
# of stats aren't being delivered within one CARBON_METRIC_INTERVAL |
||||
# (default of 60 seconds), which can lead to a queue backup. Under |
||||
# some circumstances re-setting the connection can fix this, so |
||||
# set this according to your tolerance, and look in the logs for |
||||
# "resetConnectionForQualityReasons" to observe whether this is kicking |
||||
# in when your sent queue is building up. |
||||
MIN_RESET_RATIO=0.9 |
||||
|
||||
# The minimum time between resets. When a connection is re-set, we |
||||
# need to wait before another reset is performed. |
||||
# (2*CARBON_METRIC_INTERVAL) + 1 second is the minimum time needed |
||||
# before stats for the new connection will be available. Setting this |
||||
# below (2*CARBON_METRIC_INTERVAL) + 1 second will result in a lot of |
||||
# reset connections for no good reason. |
||||
MIN_RESET_INTERVAL=121 |
||||
|
||||
[aggregator] |
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0 |
||||
LINE_RECEIVER_PORT = 2023 |
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0 |
||||
PICKLE_RECEIVER_PORT = 2024 |
||||
|
||||
# If set true, metric received will be forwarded to DESTINATIONS in addition to |
||||
# the output of the aggregation rules. If set false the carbon-aggregator will |
||||
# only ever send the output of aggregation. |
||||
FORWARD_ALL = True |
||||
|
||||
# Filenames of the configuration files to use for this instance of aggregator. |
||||
# Filenames are relative to CONF_DIR. |
||||
# |
||||
# AGGREGATION_RULES = aggregation-rules.conf |
||||
# REWRITE_RULES = rewrite-rules.conf |
||||
|
||||
# This is a list of carbon daemons we will send any relayed or |
||||
# generated metrics to. The default provided would send to a single |
||||
# carbon-cache instance on the default port. However if you |
||||
# use multiple carbon-cache instances then it would look like this: |
||||
# |
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b |
||||
# |
||||
# The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is |
||||
# optional and refers to the "None" instance if omitted. |
||||
# |
||||
# Note that if the destinations are all carbon-caches then this should |
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of |
||||
# instances listed (order matters!). |
||||
DESTINATIONS = 127.0.0.1:2004 |
||||
|
||||
# If you want to add redundancy to your data by replicating every |
||||
# datapoint to more than one machine, increase this. |
||||
REPLICATION_FACTOR = 1 |
||||
|
||||
# This is the maximum number of datapoints that can be queued up |
||||
# for a single destination. Once this limit is hit, we will |
||||
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise |
||||
# we will drop any subsequently received datapoints. |
||||
MAX_QUEUE_SIZE = 10000 |
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints |
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the |
||||
# default) then sockets over which metrics are received will temporarily stop accepting |
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE. |
||||
USE_FLOW_CONTROL = True |
||||
|
||||
# If enabled this setting is used to timeout metric client connection if no |
||||
# metrics have been sent in specified time in seconds |
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None |
||||
|
||||
# This defines the maximum "message size" between carbon daemons. |
||||
# You shouldn't need to tune this unless you really know what you're doing. |
||||
MAX_DATAPOINTS_PER_MESSAGE = 500 |
||||
|
||||
# This defines how many datapoints the aggregator remembers for |
||||
# each metric. Aggregation only happens for datapoints that fall in |
||||
# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds. |
||||
MAX_AGGREGATION_INTERVALS = 5 |
||||
|
||||
# Limit the number of open connections the receiver can handle as any time. |
||||
# Default is no limit. Setting up a limit for sites handling high volume |
||||
# traffic may be recommended to avoid running out of TCP memory or having |
||||
# thousands of TCP connections reduce the throughput of the service. |
||||
#MAX_RECEIVER_CONNECTIONS = inf |
||||
|
||||
# By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back |
||||
# aggregated data points once every rule.frequency seconds, on a per-rule basis. |
||||
# Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points |
||||
# every N seconds, independent of rule frequency. This is useful, for example, |
||||
# to be able to query partially aggregated metrics from carbon-cache without |
||||
# having to first wait rule.frequency seconds. |
||||
# WRITE_BACK_FREQUENCY = 0 |
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in |
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is |
||||
# missing or empty, all metrics will pass through |
||||
# USE_WHITELIST = False |
||||
|
||||
# By default, carbon itself will log statistics (such as a count, |
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60 |
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation |
||||
# CARBON_METRIC_PREFIX = carbon |
||||
# CARBON_METRIC_INTERVAL = 60 |
||||
|
||||
# In order to turn off logging of successful connections for the line |
||||
# receiver, set this to False |
||||
# LOG_LISTENER_CONN_SUCCESS = True |
||||
|
||||
# In order to turn off logging of metrics with no corresponding |
||||
# aggregation rules receiver, set this to False |
||||
# LOG_AGGREGATOR_MISSES = False |
||||
|
||||
# Specify the user to drop privileges to |
||||
# If this is blank carbon-aggregator runs as the user that invokes it |
||||
# USER = |
||||
|
||||
# Part of the code, and particularly aggregator rules, need |
||||
# to cache metric names. To avoid leaking too much memory you |
||||
# can tweak the size of this cache. The default allow for 1M |
||||
# different metrics per rule (~200MiB). |
||||
# CACHE_METRIC_NAMES_MAX=1000000 |
||||
|
||||
# You can optionally set a ttl to this cache. |
||||
# CACHE_METRIC_NAMES_TTL=600 |
||||
@ -1,57 +0,0 @@ |
||||
# This configuration file controls the behavior of the Dashboard UI, available |
||||
# at http://my-graphite-server/dashboard/. |
||||
# |
||||
# This file must contain a [ui] section that defines values for all of the |
||||
# following settings. |
||||
[ui] |
||||
default_graph_width = 400 |
||||
default_graph_height = 250 |
||||
automatic_variants = true |
||||
refresh_interval = 60 |
||||
autocomplete_delay = 375 |
||||
merge_hover_delay = 750 |
||||
|
||||
# You can set this 'default', 'white', or a custom theme name. |
||||
# To create a custom theme, copy the dashboard-default.css file |
||||
# to dashboard-myThemeName.css in the content/css directory and |
||||
# modify it to your liking. |
||||
theme = default |
||||
|
||||
[keyboard-shortcuts] |
||||
toggle_toolbar = ctrl-z |
||||
toggle_metrics_panel = ctrl-space |
||||
erase_all_graphs = alt-x |
||||
save_dashboard = alt-s |
||||
completer_add_metrics = alt-enter |
||||
completer_del_metrics = alt-backspace |
||||
give_completer_focus = shift-space |
||||
|
||||
# These settings apply to the UI as a whole, all other sections in this file |
||||
# pertain only to specific metric types. |
||||
# |
||||
# The dashboard presents only metrics that fall into specified naming schemes |
||||
# defined in this file. This creates a simpler, more targeted view of the |
||||
# data. The general form for defining a naming scheme is as follows: |
||||
# |
||||
#[Metric Type] |
||||
#scheme = basis.path.<field1>.<field2>.<fieldN> |
||||
#field1.label = Foo |
||||
#field2.label = Bar |
||||
# |
||||
# |
||||
# Where each <field> will be displayed as a dropdown box |
||||
# in the UI and the remaining portion of the namespace |
||||
# shown in the Metric Selector panel. The .label options set the labels |
||||
# displayed for each dropdown. |
||||
# |
||||
# For example: |
||||
# |
||||
#[Sales] |
||||
#scheme = sales.<channel>.<type>.<brand> |
||||
#channel.label = Channel |
||||
#type.label = Product Type |
||||
#brand.label = Brand |
||||
# |
||||
# This defines a 'Sales' metric type that uses 3 dropdowns in the Context Selector |
||||
# (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc) |
||||
# will be available in the Metric Selector (upper-right panel). |
||||
@ -1,38 +0,0 @@ |
||||
[default] |
||||
background = black |
||||
foreground = white |
||||
majorLine = white |
||||
minorLine = grey |
||||
lineColors = blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose |
||||
fontName = Sans |
||||
fontSize = 10 |
||||
fontBold = False |
||||
fontItalic = False |
||||
|
||||
[noc] |
||||
background = black |
||||
foreground = white |
||||
majorLine = white |
||||
minorLine = grey |
||||
lineColors = blue,green,red,yellow,purple,brown,aqua,grey,magenta,pink,gold,rose |
||||
fontName = Sans |
||||
fontSize = 10 |
||||
fontBold = False |
||||
fontItalic = False |
||||
|
||||
[plain] |
||||
background = white |
||||
foreground = black |
||||
minorLine = grey |
||||
majorLine = rose |
||||
|
||||
[summary] |
||||
background = black |
||||
lineColors = #6666ff, #66ff66, #ff6666 |
||||
|
||||
[alphas] |
||||
background = white |
||||
foreground = black |
||||
majorLine = grey |
||||
minorLine = rose |
||||
lineColors = 00ff00aa,ff000077,00337799 |
||||
@ -1,21 +0,0 @@ |
||||
# Relay destination rules for carbon-relay. Entries are scanned in order, |
||||
# and the first pattern a metric matches will cause processing to cease after sending |
||||
# unless `continue` is set to true |
||||
# |
||||
# [name] |
||||
# pattern = <regex> |
||||
# destinations = <list of destination addresses> |
||||
# continue = <boolean> # default: False |
||||
# |
||||
# name: Arbitrary unique name to identify the rule |
||||
# pattern: Regex pattern to match against the metric name |
||||
# destinations: Comma-separated list of destinations. |
||||
# ex: 127.0.0.1, 10.1.2.3:2004, 10.1.2.4:2004:a, myserver.mydomain.com |
||||
# continue: Continue processing rules if this rule matches (default: False) |
||||
|
||||
# You must have exactly one section with 'default = true' |
||||
# Note that all destinations listed must also exist in carbon.conf |
||||
# in the DESTINATIONS setting in the [relay] section |
||||
[default] |
||||
default = true |
||||
destinations = 127.0.0.1:2004:a, 127.0.0.1:2104:b |
||||
@ -1,18 +0,0 @@ |
||||
# This file defines regular expression patterns that can be used to |
||||
# rewrite metric names in a search & replace fashion. It consists of two |
||||
# sections, [pre] and [post]. The rules in the pre section are applied to |
||||
# metric names as soon as they are received. The post rules are applied |
||||
# after aggregation has taken place. |
||||
# |
||||
# The general form of each rule is as follows: |
||||
# |
||||
# regex-pattern = replacement-text |
||||
# |
||||
# For example: |
||||
# |
||||
# [post] |
||||
# _sum$ = |
||||
# _avg$ = |
||||
# |
||||
# These rules would strip off a suffix of _sum or _avg from any metric names |
||||
# after aggregation. |
||||
@ -1,42 +0,0 @@ |
||||
# Aggregation methods for whisper files. Entries are scanned in order, |
||||
# and first match wins. This file is scanned for changes every 60 seconds |
||||
# |
||||
# [name] |
||||
# pattern = <regex> |
||||
# xFilesFactor = <float between 0 and 1> |
||||
# aggregationMethod = <average|sum|last|max|min> |
||||
# |
||||
# name: Arbitrary unique name for the rule |
||||
# pattern: Regex pattern to match against the metric name |
||||
# xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur |
||||
# aggregationMethod: function to apply to data points for aggregation |
||||
# |
||||
[min] |
||||
pattern = \.lower$ |
||||
xFilesFactor = 0.1 |
||||
aggregationMethod = min |
||||
|
||||
[max] |
||||
pattern = \.upper(_\d+)?$ |
||||
xFilesFactor = 0.1 |
||||
aggregationMethod = max |
||||
|
||||
[sum] |
||||
pattern = \.sum$ |
||||
xFilesFactor = 0 |
||||
aggregationMethod = sum |
||||
|
||||
[count] |
||||
pattern = \.count$ |
||||
xFilesFactor = 0 |
||||
aggregationMethod = sum |
||||
|
||||
[count_legacy] |
||||
pattern = ^stats_counts.* |
||||
xFilesFactor = 0 |
||||
aggregationMethod = sum |
||||
|
||||
[default_average] |
||||
pattern = .* |
||||
xFilesFactor = 0.3 |
||||
aggregationMethod = average |
||||
@ -1,36 +0,0 @@ |
||||
# Schema definitions for Whisper files. Entries are scanned in order, |
||||
# and first match wins. This file is scanned for changes every 60 seconds. |
||||
# |
||||
# Definition Syntax: |
||||
# |
||||
# [name] |
||||
# pattern = regex |
||||
# retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ... |
||||
# |
||||
# Remember: To support accurate aggregation from higher to lower resolution |
||||
# archives, the precision of a longer retention archive must be |
||||
# cleanly divisible by precision of next lower retention archive. |
||||
# |
||||
# Valid: 60s:7d,300s:30d (300/60 = 5) |
||||
# Invalid: 180s:7d,300s:30d (300/180 = 3.333) |
||||
# |
||||
|
||||
# Carbon's internal metrics. This entry should match what is specified in |
||||
# CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings |
||||
|
||||
[carbon] |
||||
pattern = ^carbon\..* |
||||
retentions = 1m:31d,10m:1y,1h:5y |
||||
|
||||
[highres] |
||||
pattern = ^highres.* |
||||
retentions = 1s:1d,1m:7d |
||||
|
||||
[statsd] |
||||
pattern = ^statsd.* |
||||
retentions = 1m:7d,10m:1y |
||||
|
||||
[default] |
||||
pattern = .* |
||||
retentions = 10s:1d,1m:7d,10m:1y |
||||
|
||||
@ -1,6 +0,0 @@ |
||||
# This file takes a single regular expression per line |
||||
# If USE_WHITELIST is set to True in carbon.conf, only metrics received which |
||||
# match one of these expressions will be persisted. If this file is empty or |
||||
# missing, all metrics will pass through. |
||||
# This file is reloaded automatically when changes are made |
||||
.* |
||||
@ -1,94 +0,0 @@ |
||||
"""Copyright 2008 Orbitz WorldWide |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License.""" |
||||
|
||||
# Django settings for graphite project. |
||||
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead |
||||
from os.path import dirname, join, abspath |
||||
|
||||
|
||||
#Django settings below, do not touch! |
||||
APPEND_SLASH = False |
||||
TEMPLATE_DEBUG = False |
||||
|
||||
TEMPLATES = [ |
||||
{ |
||||
'BACKEND': 'django.template.backends.django.DjangoTemplates', |
||||
'DIRS': [ |
||||
join(dirname( abspath(__file__) ), 'templates') |
||||
], |
||||
'APP_DIRS': True, |
||||
'OPTIONS': { |
||||
'context_processors': [ |
||||
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this |
||||
# list if you haven't customized them: |
||||
'django.contrib.auth.context_processors.auth', |
||||
'django.template.context_processors.debug', |
||||
'django.template.context_processors.i18n', |
||||
'django.template.context_processors.media', |
||||
'django.template.context_processors.static', |
||||
'django.template.context_processors.tz', |
||||
'django.contrib.messages.context_processors.messages', |
||||
], |
||||
}, |
||||
}, |
||||
] |
||||
|
||||
# Language code for this installation. All choices can be found here: |
||||
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes |
||||
# http://blogs.law.harvard.edu/tech/stories/storyReader$15 |
||||
LANGUAGE_CODE = 'en-us' |
||||
|
||||
# Absolute path to the directory that holds media. |
||||
MEDIA_ROOT = '' |
||||
|
||||
# URL that handles the media served from MEDIA_ROOT. |
||||
# Example: "http://media.lawrence.com" |
||||
MEDIA_URL = '' |
||||
|
||||
MIDDLEWARE_CLASSES = ( |
||||
'graphite.middleware.LogExceptionsMiddleware', |
||||
'django.middleware.common.CommonMiddleware', |
||||
'django.middleware.gzip.GZipMiddleware', |
||||
'django.contrib.sessions.middleware.SessionMiddleware', |
||||
'django.contrib.auth.middleware.AuthenticationMiddleware', |
||||
'django.contrib.messages.middleware.MessageMiddleware', |
||||
) |
||||
|
||||
ROOT_URLCONF = 'graphite.urls' |
||||
|
||||
INSTALLED_APPS = ( |
||||
'graphite.metrics', |
||||
'graphite.render', |
||||
'graphite.browser', |
||||
'graphite.composer', |
||||
'graphite.account', |
||||
'graphite.dashboard', |
||||
'graphite.whitelist', |
||||
'graphite.events', |
||||
'graphite.url_shortener', |
||||
'django.contrib.auth', |
||||
'django.contrib.sessions', |
||||
'django.contrib.admin', |
||||
'django.contrib.contenttypes', |
||||
'django.contrib.staticfiles', |
||||
'tagging', |
||||
) |
||||
|
||||
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend'] |
||||
|
||||
GRAPHITE_WEB_APP_SETTINGS_LOADED = True |
||||
|
||||
STATIC_URL = '/static/' |
||||
|
||||
STATIC_ROOT = '/opt/graphite/static/' |
||||
@ -1,215 +0,0 @@ |
||||
## Graphite local_settings.py |
||||
# Edit this file to customize the default Graphite webapp settings |
||||
# |
||||
# Additional customizations to Django settings can be added to this file as well |
||||
|
||||
##################################### |
||||
# General Configuration # |
||||
##################################### |
||||
# Set this to a long, random unique string to use as a secret key for this |
||||
# install. This key is used for salting of hashes used in auth tokens, |
||||
# CRSF middleware, cookie storage, etc. This should be set identically among |
||||
# instances if used behind a load balancer. |
||||
#SECRET_KEY = 'UNSAFE_DEFAULT' |
||||
|
||||
# In Django 1.5+ set this to the list of hosts your graphite instances is |
||||
# accessible as. See: |
||||
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS |
||||
#ALLOWED_HOSTS = [ '*' ] |
||||
|
||||
# Set your local timezone (Django's default is America/Chicago) |
||||
# If your graphs appear to be offset by a couple hours then this probably |
||||
# needs to be explicitly set to your local timezone. |
||||
#TIME_ZONE = 'America/Los_Angeles' |
||||
|
||||
# Override this to provide documentation specific to your Graphite deployment |
||||
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/" |
||||
|
||||
# Logging |
||||
#LOG_RENDERING_PERFORMANCE = True |
||||
#LOG_CACHE_PERFORMANCE = True |
||||
#LOG_METRIC_ACCESS = True |
||||
|
||||
# Enable full debug page display on exceptions (Internal Server Error pages) |
||||
#DEBUG = True |
||||
|
||||
# If using RRD files and rrdcached, set to the address or socket of the daemon |
||||
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock' |
||||
|
||||
# This lists the memcached servers that will be used by this webapp. |
||||
# If you have a cluster of webapps you should ensure all of them |
||||
# have the *exact* same value for this setting. That will maximize cache |
||||
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of |
||||
# memcached entirely. |
||||
# |
||||
# You should not use the loopback address (127.0.0.1) here if using clustering |
||||
# as every webapp in the cluster should use the exact same values to prevent |
||||
# unneeded cache misses. Set to [] to disable caching of images and fetched data |
||||
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211'] |
||||
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute |
||||
|
||||
|
||||
##################################### |
||||
# Filesystem Paths # |
||||
##################################### |
||||
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite |
||||
# to somewhere else |
||||
#GRAPHITE_ROOT = '/opt/graphite' |
||||
|
||||
# Most installs done outside of a separate tree such as /opt/graphite will only |
||||
# need to change these three settings. Note that the default settings for each |
||||
# of these is relative to GRAPHITE_ROOT |
||||
#CONF_DIR = '/opt/graphite/conf' |
||||
#STORAGE_DIR = '/opt/graphite/storage' |
||||
#CONTENT_DIR = '/opt/graphite/webapp/content' |
||||
|
||||
# To further or fully customize the paths, modify the following. Note that the |
||||
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR |
||||
# |
||||
## Webapp config files |
||||
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf' |
||||
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf' |
||||
|
||||
## Data directories |
||||
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing |
||||
#WHISPER_DIR = '/opt/graphite/storage/whisper' |
||||
#RRD_DIR = '/opt/graphite/storage/rrd' |
||||
#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables |
||||
#LOG_DIR = '/opt/graphite/storage/log/webapp' |
||||
#INDEX_FILE = '/opt/graphite/storage/index' # Search index file |
||||
|
||||
|
||||
##################################### |
||||
# Email Configuration # |
||||
##################################### |
||||
# This is used for emailing rendered Graphs |
||||
# Default backend is SMTP |
||||
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' |
||||
#EMAIL_HOST = 'localhost' |
||||
#EMAIL_PORT = 25 |
||||
#EMAIL_HOST_USER = '' |
||||
#EMAIL_HOST_PASSWORD = '' |
||||
#EMAIL_USE_TLS = False |
||||
# To drop emails on the floor, enable the Dummy backend: |
||||
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' |
||||
|
||||
|
||||
##################################### |
||||
# Authentication Configuration # |
||||
##################################### |
||||
## LDAP / ActiveDirectory authentication setup |
||||
#USE_LDAP_AUTH = True |
||||
#LDAP_SERVER = "ldap.mycompany.com" |
||||
#LDAP_PORT = 389 |
||||
# OR |
||||
#LDAP_URI = "ldaps://ldap.mycompany.com:636" |
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com" |
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com" |
||||
#LDAP_BASE_PASS = "readonly_account_password" |
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)" |
||||
# |
||||
# If you want to further customize the ldap connection options you should |
||||
# directly use ldap.set_option to set the ldap module's global options. |
||||
# For example: |
||||
# |
||||
#import ldap |
||||
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW) |
||||
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca") |
||||
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem") |
||||
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem") |
||||
# See http://www.python-ldap.org/ for further details on these options. |
||||
|
||||
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/ |
||||
#USE_REMOTE_USER_AUTHENTICATION = True |
||||
|
||||
# Override the URL for the login link (e.g. for django_openid_auth) |
||||
#LOGIN_URL = '/account/login' |
||||
|
||||
|
||||
########################## |
||||
# Database Configuration # |
||||
########################## |
||||
# By default sqlite is used. If you cluster multiple webapps you will need |
||||
# to setup an external database (such as MySQL) and configure all of the webapp |
||||
# instances to use the same database. Note that this database is only used to store |
||||
# Django models such as saved graphs, dashboards, user preferences, etc. |
||||
# Metric data is not stored here. |
||||
# |
||||
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE |
||||
# |
||||
# The following built-in database engines are available: |
||||
# django.db.backends.postgresql # Removed in Django 1.4 |
||||
# django.db.backends.postgresql_psycopg2 |
||||
# django.db.backends.mysql |
||||
# django.db.backends.sqlite3 |
||||
# django.db.backends.oracle |
||||
# |
||||
# The default is 'django.db.backends.sqlite3' with file 'graphite.db' |
||||
# located in STORAGE_DIR |
||||
# |
||||
#DATABASES = { |
||||
# 'default': { |
||||
# 'NAME': '/opt/graphite/storage/graphite.db', |
||||
# 'ENGINE': 'django.db.backends.sqlite3', |
||||
# 'USER': '', |
||||
# 'PASSWORD': '', |
||||
# 'HOST': '', |
||||
# 'PORT': '' |
||||
# } |
||||
#} |
||||
# |
||||
|
||||
|
||||
######################### |
||||
# Cluster Configuration # |
||||
######################### |
||||
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section) |
||||
# |
||||
# This should list the IP address (and optionally port) of the webapp on each |
||||
# remote server in the cluster. These servers must each have local access to |
||||
# metric data. Note that the first server to return a match for a query will be |
||||
# used. |
||||
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"] |
||||
|
||||
## These are timeout values (in seconds) for requests to remote webapps |
||||
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data |
||||
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests |
||||
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp |
||||
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results |
||||
|
||||
## Remote rendering settings |
||||
# Set to True to enable rendering of Graphs on a remote webapp |
||||
#REMOTE_RENDERING = True |
||||
# List of IP (and optionally port) of the webapp on each remote server that |
||||
# will be used for rendering. Note that each rendering host should have local |
||||
# access to metric data or should have CLUSTER_SERVERS configured |
||||
#RENDERING_HOSTS = [] |
||||
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0 |
||||
|
||||
# If you are running multiple carbon-caches on this machine (typically behind a relay using |
||||
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache |
||||
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002 |
||||
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc. |
||||
# |
||||
# You *should* use 127.0.0.1 here in most cases |
||||
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"] |
||||
#CARBONLINK_TIMEOUT = 1.0 |
||||
|
||||
##################################### |
||||
# Additional Django Settings # |
||||
##################################### |
||||
# Uncomment the following line for direct access to Django settings such as |
||||
# MIDDLEWARE_CLASSES or APPS |
||||
#from graphite.app_settings import * |
||||
|
||||
import os |
||||
|
||||
LOG_DIR = '/var/log/graphite' |
||||
SECRET_KEY = '$(date +%s | sha256sum | base64 | head -c 64)' |
||||
|
||||
if (os.getenv("MEMCACHE_HOST") is not None): |
||||
MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOST").split(",") |
||||
|
||||
if (os.getenv("DEFAULT_CACHE_DURATION") is not None): |
||||
DEFAULT_CACHE_DURATION = int(os.getenv("CACHE_DURATION")) |
||||
|
||||
@ -1,6 +0,0 @@ |
||||
{ |
||||
"graphiteHost": "127.0.0.1", |
||||
"graphitePort": 2003, |
||||
"port": 8125, |
||||
"flushInterval": 10000 |
||||
} |
||||
@ -1,26 +0,0 @@ |
||||
#!/usr/bin/env expect |
||||
|
||||
set timeout -1 |
||||
spawn /usr/local/bin/manage.sh |
||||
|
||||
expect "Would you like to create one now" { |
||||
send "yes\r" |
||||
} |
||||
|
||||
expect "Username" { |
||||
send "root\r" |
||||
} |
||||
|
||||
expect "Email address:" { |
||||
send "root.graphite@mailinator.com\r" |
||||
} |
||||
|
||||
expect "Password:" { |
||||
send "root\r" |
||||
} |
||||
|
||||
expect "Password *:" { |
||||
send "root\r" |
||||
} |
||||
|
||||
expect "Superuser created successfully" |
||||
@ -1,3 +0,0 @@ |
||||
#!/bin/bash |
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings |
||||
# PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings |
||||
@ -1,21 +1,16 @@ |
||||
graphite: |
||||
build: |
||||
context: docker/blocks/graphite1 |
||||
args: |
||||
version: master |
||||
graphite1: |
||||
image: graphiteapp/graphite-statsd:1.0.2-3 |
||||
ports: |
||||
- "8080:80" |
||||
- "2003:2003" |
||||
- "8125:8125/udp" |
||||
- "8126:8126" |
||||
volumes: |
||||
- /etc/localtime:/etc/localtime:ro |
||||
- /etc/timezone:/etc/timezone:ro |
||||
- "8280:80" |
||||
- "2203-2204:2003-2004" |
||||
- "2223-2224:2023-2024" |
||||
|
||||
fake-graphite-data: |
||||
fake-graphite1-data: |
||||
image: grafana/fake-data-gen |
||||
network_mode: bridge |
||||
environment: |
||||
FD_DATASOURCE: graphite |
||||
FD_PORT: 2003 |
||||
|
||||
FD_PORT: 2203 |
||||
FD_GRAPHITE_VERSION: 1.0 |
||||
depends_on: |
||||
- graphite1 |
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,18 +0,0 @@ |
||||
graphite11: |
||||
image: graphiteapp/graphite-statsd |
||||
ports: |
||||
- "8180:80" |
||||
- "2103-2104:2003-2004" |
||||
- "2123-2124:2023-2024" |
||||
- "8225:8125/udp" |
||||
- "8226:8126" |
||||
|
||||
fake-graphite11-data: |
||||
image: grafana/fake-data-gen |
||||
network_mode: bridge |
||||
environment: |
||||
FD_DATASOURCE: graphite |
||||
FD_PORT: 2103 |
||||
FD_GRAPHITE_VERSION: 1.1 |
||||
depends_on: |
||||
- graphite11 |
||||
Loading…
Reference in new issue