Add docker-compose cluster deployment file (#2740)

- To help beginners learning how to launch a cluster of
  loki on a local machine
- Use single loki config for both front-end and other nodes
- Fix query lookback and add /ring to gateway

Signed-off-by: Hui Kang <kangh@us.ibm.com>
pull/2888/head
Hui Kang 5 years ago committed by GitHub
parent 3b8962403b
commit d38377a2d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      production/README.md
  2. 37
      production/docker/README.md
  3. 1
      production/docker/chunks/.gitignore
  4. 108
      production/docker/config/loki-docker-memberlist-boltdb-shipper.yaml
  5. 67
      production/docker/config/nginx-loki-gateway.conf
  6. 19
      production/docker/config/promtail-gateway.yaml
  7. BIN
      production/docker/docker-compose-ha-diagram.png
  8. 88
      production/docker/docker-compose-ha-memberlist.yaml

@ -41,6 +41,8 @@ Until this is fixed we recommend [building and running from source](#build-and-r
For instructions on how to query Loki, see [our usage docs](https://grafana.com/docs/loki/latest/logql/).
To deploy a cluster of loki locally, please refer to this [doc](./docker/)
## Using Helm to deploy on Kubernetes
There is a [Helm chart](helm) to deploy Loki and Promtail to Kubernetes.

@ -0,0 +1,37 @@
# Loki cluster using docker-compose
To deploy a cluster of loki nodes on a local machine (as shown below), you could use the `docker-compose-ha-member.yaml` file.
<img src="./docker-compose-ha-diagram.png" width="850">
Some features of the deployment:
- Backend: 3 Loki servers enabled with distributor, ingester, querier module
- Together they form a cluster ring based on memberlist mechanism (if using consul/etcd, modules can be separate for further separate read/write workloads)
- Index data are stored and replicated through botldb-shipper
- Replication_factor=2: the receiving distributor sends log data to 2 ingesters based on consistent hashing
- Chunk storage is a shared directory mounted from the same host directory (to simulate S3 or gcs)
- Query are performed through the two query frontend servers
- An nginx gateway to route the write and read workloads from clients (Grafana, promtail)
1. Ensure you have the most up-to-date Docker container images:
```bash
docker-compose pull
```
1. Run the stack on your local Docker:
```bash
docker-compose -f ./docker-compose-ha-memberlist.yaml up
```
1. When adding data source in the grafana dashboar, using `http://loki-gateway:3100` for the URL field.
1. To clean up
```bash
docker-compose -f ./docker-compose-ha-memberlist.yaml down
```
Remove the chunk data under `./chunks/`.

@ -0,0 +1,108 @@
auth_enabled: false
http_prefix:
server:
http_listen_address: 0.0.0.0
grpc_listen_address: 0.0.0.0
http_listen_port: 3100
grpc_listen_port: 9095
log_level: debug
memberlist:
join_members: ["loki-1", "loki_2", "loki_3"]
dead_node_reclaim_time: 30s
gossip_to_dead_nodes_time: 15s
left_ingesters_timeout: 30s
bind_addr: ['0.0.0.0']
bind_port: 7946
ingester:
lifecycler:
join_after: 60s
observe_period: 5s
ring:
replication_factor: 2
kvstore:
store: memberlist
final_sleep: 0s
chunk_idle_period: 1h
max_chunk_age: 1h
chunk_retain_period: 30s
chunk_encoding: snappy
chunk_target_size: 0
chunk_block_size: 262144
# chunk_target_size: 1.572864e+06
# Only needed for global rate strategy
# distributor:
# ring:
# kvstore:
# store: memberlist
schema_config:
configs:
- from: 2020-08-01
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
# shared_store: s3
shared_store: filesystem
active_index_directory: /tmp/loki/index
cache_location: /tmp/loki/boltdb-cache
#aws:
# s3: s3://us-east-1/mybucket
# sse_encryption: true
# insecure: false
# s3forcepathstyle: true
filesystem:
directory: /loki/chunks
limits_config:
max_cache_freshness_per_query: '10m'
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 30m
ingestion_rate_mb: 10
ingestion_burst_size_mb: 20
chunk_store_config:
max_look_back_period: 336h
table_manager:
retention_deletes_enabled: true
retention_period: 336h
query_range:
# make queries more cache-able by aligning them with their step intervals
align_queries_with_step: true
max_retries: 5
# parallelize queries in 15min intervals
split_queries_by_interval: 15m
parallelise_shardable_queries: true
cache_results: true
results_cache:
cache:
# We're going to use the in-process "FIFO" cache
enable_fifocache: true
fifocache:
size: 1024
validity: 24h
frontend:
log_queries_longer_than: 5s
# downstream_url: http://loki-1:3100
downstream_url: http://loki-gateway:3100
compress_responses: true
querier:
query_ingesters_within: 2h

@ -0,0 +1,67 @@
error_log /dev/stderr;
pid /tmp/nginx.pid;
worker_rlimit_nofile 8192;
events {
worker_connections 4096; ## Default: 1024
}
http {
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stderr main;
sendfile on;
tcp_nopush on;
upstream distributor {
server loki-1:3100;
server loki-2:3100;
server loki-3:3100;
}
upstream querier {
server loki-1:3100;
server loki-2:3100;
server loki-3:3100;
}
upstream query-frontend {
server loki-frontend:3100;
}
server {
listen 80;
proxy_set_header X-Scope-OrgID docker-ha;
location = /loki/api/v1/push {
proxy_pass http://distributor$request_uri;
}
location = /ring {
proxy_pass http://distributor$request_uri;
}
location = /loki/api/v1/tail {
proxy_pass http://querier$request_uri;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
location ~ /loki/api/.* {
proxy_pass http://query-frontend$request_uri;
}
}
server {
listen 3100;
proxy_set_header X-Scope-OrgID docker-ha;
location ~ /loki/api/.* {
proxy_pass http://querier$request_uri;
}
}
}

@ -0,0 +1,19 @@
server:
http_listen_port: 9080
grpc_listen_port: 0
log_level: "debug"
positions:
filename: /tmp/positions.yaml
clients:
- url: http://loki-gateway:80/loki/api/v1/push
scrape_configs:
- job_name: system
static_configs:
- targets:
- localhost
labels:
job: varlogs
__path__: /var/log/*log

Binary file not shown.

After

Width:  |  Height:  |  Size: 121 KiB

@ -0,0 +1,88 @@
version: "3.8"
networks:
loki:
services:
grafana:
image: grafana/grafana:7.2.0
ports:
- "3000:3000"
networks:
- loki
promtail:
image: grafana/promtail:2.0.0
volumes:
- /var/log:/var/log
- ./config:/etc/promtail/
ports:
- "9080:9080"
command: -config.file=/etc/promtail/promtail-gateway.yaml
networks:
- loki
loki-gateway:
image: nginx:1.19
volumes:
- ./config/nginx-loki-gateway.conf:/etc/nginx/nginx.conf
ports:
- "80"
- "3100"
networks:
- loki
loki-frontend:
image: grafana/loki:2.0.0
volumes:
- ./config:/etc/loki/
ports:
- "3100"
command: "-config.file=/etc/loki/loki-docker-memberlist-boltdb-shipper.yaml -target=query-frontend"
networks:
- loki
deploy:
mode: replicated
replicas: 2
loki-1:
image: grafana/loki:1.6.1
volumes:
- ./config:/etc/loki/
- ./chunks:/loki/chunks/
ports:
- "3100"
- "7946"
command: "-config.file=/etc/loki/loki-docker-memberlist-boltdb-shipper.yaml -target=all"
networks:
- loki
restart: on-failure
loki-2:
image: grafana/loki:1.6.1
volumes:
- ./config:/etc/loki/
- ./chunks:/loki/chunks/
ports:
- "3100"
- "7946"
command: "-config.file=/etc/loki/loki-docker-memberlist-boltdb-shipper.yaml -target=all"
# command: "-config.file=/etc/loki/loki-config.yaml"
networks:
- loki
restart: on-failure
loki-3:
image: grafana/loki:1.6.1
volumes:
- ./config:/etc/loki/
- ./chunks:/loki/chunks/
ports:
- "3100"
- "7946"
command: "-config.file=/etc/loki/loki-docker-memberlist-boltdb-shipper.yaml -target=all"
# command: "-config.file=/etc/loki/loki-config.yaml"
networks:
- loki
restart: on-failure
Loading…
Cancel
Save