Add ksonnet config (#32)

* Add ksonnet config

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* Address feedback

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* /s/tempo/loki

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* Instructions for running promtail

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* Add dependencies and lock-files

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* Minor promtail and loki updates

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* WIP: README

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* Add a single-tenant gateway

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* Don't reference default, use loki everywhere

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* Update to latest master

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* Make gateway optional with the ksonnet

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* Remove lock files as this is a lib

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>
pull/56/head
Goutham Veeramachaneni 7 years ago committed by Tom Wilkie
parent 20af4e44fe
commit a375a727ba
  1. 90
      ksonnet/README.md
  2. 14
      ksonnet/loki/common.libsonnet
  3. 11
      ksonnet/loki/config.libsonnet
  4. 24
      ksonnet/loki/distributor.libsonnet
  5. 80
      ksonnet/loki/gateway.libsonnet
  6. 17
      ksonnet/loki/images.libsonnet
  7. 33
      ksonnet/loki/ingester.libsonnet
  8. 24
      ksonnet/loki/jsonnetfile.json
  9. 10
      ksonnet/loki/loki.libsonnet
  10. 23
      ksonnet/loki/querier.libsonnet
  11. 14
      ksonnet/promtail/jsonnetfile.json
  12. 188
      ksonnet/promtail/promtail.libsonnet

@ -0,0 +1,90 @@
# Deploy Loki to Kubernetes
## Prerequisites
Make sure you have the ksonnet v0.8.0:
```
$ brew install https://raw.githubusercontent.com/ksonnet/homebrew-tap/82ef24cb7b454d1857db40e38671426c18cd8820/ks.rb
$ brew pin ks
$ ks version
ksonnet version: v0.8.0
jsonnet version: v0.9.5
client-go version: v1.6.8-beta.0+$Format:%h$
```
In your config repo, if you don't have a ksonnet application, make a new one (will copy credentials from current context):
```
$ ks init <application name>
$ cd <application name>
$ ks env add loki --namespace=loki
```
## Deploying Promtail to your cluster.
Grab the promtail module using jb:
```
$ go get -u github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
$ jb init
$ jb install github.com/grafana/loki/ksonnet/promtail
```
Add the following to the file: `environments/loki/main.jsonnet`
```
local promtail = import 'promtail/promtail.libsonnet';
promtail + {
_config+:: {
namespace: 'loki',
promtail_config: {
scheme: 'https',
hostname: 'logs-us-west1.grafana.net',
username: 'user-id',
password: 'password',
},
}
```
Then do `ks show loki` to see the manifests that'll be deployed to your cluster.
Apply them using `ks apply loki`.
## Deploying Loki to your cluster.
If you want to further also deploy the server to the cluster, then run the following to install the module:
```
jb install github.com/grafana/loki/ksonnet/loki
```
Be sure to replace the username, password and the relevant htpasswd contents.
Replace the contents of `environments/loki/main.jsonnet` with:
```
local gateway = import 'loki/gateway.libsonnet';
local loki = import 'loki/loki.libsonnet';
local promtail = import 'promtail/promtail.libsonnet';
loki + promtail + gateway {
_config+:: {
namespace: 'loki',
htpasswd_contents: 'loki:$apr1$H4yGiGNg$ssl5/NymaGFRUvxIV1Nyr.',
promtail_config: {
scheme: 'http',
hostname: 'gateway.%(namespace)s.svc' % $._config,
username: 'loki',
password: 'password'
},
replication_factor: 3,
consul_replicas: 1,
},
}
```
Do `ks show loki` to see the manifests being deployed to the cluster.
Finally `ks apply loki` to deploy the server components to your cluster.

@ -0,0 +1,14 @@
{
namespace:
$.core.v1.namespace.new($._config.namespace),
util+:: {
local containerPort = $.core.v1.containerPort,
defaultPorts::
[
containerPort.newNamed('http-metrics', 80),
containerPort.newNamed('grpc', 9095),
],
},
}

@ -0,0 +1,11 @@
{
_config+: {
namespace: error 'must define namespace',
replication_factor: 3,
ringConfig: {
'consul.hostname': 'consul.%s.svc.cluster.local:8500' % $._config.namespace,
'consul.prefix': '',
},
},
}

@ -0,0 +1,24 @@
{
local container = $.core.v1.container,
local containerPort = $.core.v1.containerPort,
distributor_args::
$._config.ringConfig {
target: 'distributor',
'distributor.replication-factor': $._config.replication_factor,
},
distributor_container::
container.new('distributor', $._images.distributor) +
container.withPorts($.util.defaultPorts) +
container.withArgsMixin($.util.mapToFlags($.distributor_args)),
local deployment = $.apps.v1beta1.deployment,
distributor_deployment:
deployment.new('distributor', 3, [$.distributor_container]) +
$.util.antiAffinity,
distributor_service:
$.util.serviceFor($.distributor_deployment),
}

@ -0,0 +1,80 @@
{
_config+:: {
htpasswd_contents: error 'must specify htpasswd contents',
},
_images+:: {
nginx: 'nginx:1.15.1-alpine',
},
local secret = $.core.v1.secret,
gateway_secret:
secret.new('gateway-secret', {
'.htpasswd': std.base64($._config.htpasswd_contents),
}),
local configMap = $.core.v1.configMap,
gateway_config:
configMap.new('gateway-config') +
configMap.withData({
'nginx.conf': |||
worker_processes 5; ## Default: 1
error_log /dev/stderr;
pid /tmp/nginx.pid;
worker_rlimit_nofile 8192;
events {
worker_connections 4096; ## Default: 1024
}
http {
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stderr main;
sendfile on;
tcp_nopush on;
resolver kube-dns.kube-system.svc.cluster.local;
server {
listen 80;
auth_basic “Prometheus”;
auth_basic_user_file /etc/nginx/secrets/.htpasswd;
proxy_set_header X-Scope-OrgID 1;
location = /api/prom/push {
proxy_pass http://distributor.%(namespace)s.svc.cluster.local$request_uri;
}
location ~ /api/prom/.* {
proxy_pass http://querier.%(namespace)s.svc.cluster.local$request_uri;
}
}
}
||| % $._config,
}),
local container = $.core.v1.container,
local containerPort = $.core.v1.containerPort,
gateway_container::
container.new('nginx', $._images.nginx) +
container.withPorts($.core.v1.containerPort.new('http', 80)) +
$.util.resourcesRequests('50m', '100Mi'),
local deployment = $.apps.v1beta1.deployment,
gateway_deployment:
deployment.new('gateway', 3, [
$.gateway_container,
]) +
$.util.configVolumeMount('gateway-config', '/etc/nginx') +
$.util.secretVolumeMount('gateway-secret', '/etc/nginx/secrets', defaultMode=420) +
$.util.antiAffinity,
gateway_service:
$.util.serviceFor($.gateway_deployment),
}

@ -0,0 +1,17 @@
{
_images+:: {
// Various third-party images.
memcached: 'memcached:1.5.6-alpine',
memcachedExporter: 'prom/memcached-exporter:v0.4.1',
// Our services.
cortex_gw: 'raintank/cortex-gw:0.9.0-93-gceff250',
tableManager: 'grafana/cortex-table-manager:r45-6247bbc8',
loki: 'grafana/loki:master-d5e6c60',
distributor: self.loki,
ingester: self.loki,
querier: self.loki,
},
}

@ -0,0 +1,33 @@
{
local container = $.core.v1.container,
ingester_args::
$._config.ringConfig {
target: 'ingester',
'ingester.num-tokens': '512',
'ingester.join-after': '30s',
'ingester.claim-on-rollout': true,
},
ingester_container::
container.new('ingester', $._images.ingester) +
container.withPorts($.util.defaultPorts) +
container.withArgsMixin($.util.mapToFlags($.ingester_args)) +
container.mixin.readinessProbe.httpGet.withPath('/ready') +
container.mixin.readinessProbe.httpGet.withPort(80) +
container.mixin.readinessProbe.withInitialDelaySeconds(15) +
container.mixin.readinessProbe.withTimeoutSeconds(1),
local deployment = $.apps.v1beta1.deployment,
ingester_deployment:
deployment.new('ingester', 3, [$.ingester_container]) +
$.util.antiAffinity +
deployment.mixin.spec.withMinReadySeconds(60) +
deployment.mixin.spec.strategy.rollingUpdate.withMaxSurge(0) +
deployment.mixin.spec.strategy.rollingUpdate.withMaxUnavailable(1) +
deployment.mixin.spec.template.spec.withTerminationGracePeriodSeconds(4800),
ingester_service:
$.util.serviceFor($.ingester_deployment),
}

@ -0,0 +1,24 @@
{
"dependencies": [
{
"name": "ksonnet-util",
"source": {
"git": {
"remote": "https://github.com/grafana/jsonnet-libs",
"subdir": "ksonnet-util"
}
},
"version": "master"
},
{
"name": "consul",
"source": {
"git": {
"remote": "https://github.com/gouthamve/public",
"subdir": "consul"
}
},
"version": "consul"
}
]
}

@ -0,0 +1,10 @@
(import 'ksonnet-util/kausal.libsonnet') +
(import 'images.libsonnet') +
(import 'common.libsonnet') +
(import 'config.libsonnet') +
(import 'consul/consul.libsonnet') +
// Cortex services
(import 'distributor.libsonnet') +
(import 'ingester.libsonnet') +
(import 'querier.libsonnet')

@ -0,0 +1,23 @@
{
local container = $.core.v1.container,
querier_args::
$._config.ringConfig {
target: 'querier',
'distributor.replication-factor': $._config.replication_factor,
},
querier_container::
container.new('querier', $._images.querier) +
container.withPorts($.util.defaultPorts) +
container.withArgsMixin($.util.mapToFlags($.querier_args)),
local deployment = $.apps.v1beta1.deployment,
querier_deployment:
deployment.new('querier', 3, [$.querier_container]) +
$.util.antiAffinity,
querier_service:
$.util.serviceFor($.querier_deployment),
}

@ -0,0 +1,14 @@
{
"dependencies": [
{
"name": "ksonnet-util",
"source": {
"git": {
"remote": "https://github.com/grafana/jsonnet-libs",
"subdir": "ksonnet-util"
}
},
"version": "master"
}
]
}

@ -0,0 +1,188 @@
local k = import 'ksonnet-util/kausal.libsonnet';
k {
_images+:: {
promtail: 'grafana/promtail:master-5da1fde',
},
_config+:: {
prometheus_insecure_skip_verify: false,
promtail_config: {
username: '',
password: '',
scheme: 'https',
hostname: 'log-us.grafana.net',
},
service_url:
if std.objectHas(self.promtail_config, 'username') then
'%(scheme)s://%(username)s:%(password)s@%(hostname)s/api/prom/push' % self.promtail_config
else
'%(scheme)s://%(hostname)s/api/prom/push' % self.promtail_config,
},
namespace:
$.core.v1.namespace.new($._config.namespace),
local policyRule = $.rbac.v1beta1.policyRule,
promtail_rbac:
$.util.rbac('promtail', [
policyRule.new() +
policyRule.withApiGroups(['']) +
policyRule.withResources(['nodes', 'nodes/proxy', 'services', 'endpoints', 'pods']) +
policyRule.withVerbs(['get', 'list', 'watch']),
]),
promtail_config:: {
scrape_configs: [
{
job_name: 'kubernetes-pods',
kubernetes_sd_configs: [{
role: 'pod',
}],
relabel_configs: [
// Only scrape local pods; Promtail will drop targets with a __host__ label
// that does not match the current host name.
{
source_labels: ['__meta_kubernetes_pod_node_name'],
target_label: '__host__',
},
// Drop pods without a name label
{
source_labels: ['__meta_kubernetes_pod_label_name'],
action: 'drop',
regex: '^$',
},
// Rename jobs to be <namespace>/<name, from pod name label>
{
source_labels: ['__meta_kubernetes_namespace', '__meta_kubernetes_pod_label_name'],
action: 'replace',
separator: '/',
target_label: 'job',
replacement: '$1',
},
// But also include the namespace as a separate label, for routing alerts
{
source_labels: ['__meta_kubernetes_namespace'],
action: 'replace',
target_label: 'namespace',
},
// Rename instances to be the pod name
{
source_labels: ['__meta_kubernetes_pod_name'],
action: 'replace',
target_label: 'instance',
},
// Kubernetes puts logs under subdirectories keyed pod UID and container_name.
{
source_labels: ['__meta_kubernetes_pod_uid', '__meta_kubernetes_pod_container_name'],
target_label: '__path__',
separator: '/',
replacement: '/var/log/pods/$1',
},
],
},
{
job_name: 'kubernetes-pods-app',
kubernetes_sd_configs: [{
role: 'pod',
}],
relabel_configs: [
// Only scrape local pods; Promtail will drop targets with a __host__ label
// that does not match the current host name.
{
source_labels: ['__meta_kubernetes_pod_node_name'],
target_label: '__host__',
},
// Drop pods without a app label
{
source_labels: ['__meta_kubernetes_pod_label_app'],
action: 'drop',
regex: '^$',
},
// Rename jobs to be <namespace>/<app, from pod app label>
{
source_labels: ['__meta_kubernetes_namespace', '__meta_kubernetes_pod_label_app'],
action: 'replace',
separator: '/',
target_label: 'job',
replacement: '$1',
},
// But also include the namespace as a separate label, for routing alerts
{
source_labels: ['__meta_kubernetes_namespace'],
action: 'replace',
target_label: 'namespace',
},
// Rename instances to be the pod name
{
source_labels: ['__meta_kubernetes_pod_name'],
action: 'replace',
target_label: 'instance',
},
// Also include all the other labels on the pod.
{
action: 'labelmap',
regex: '__meta_kubernetes_pod_label_(.+)',
},
// Kubernetes puts logs under subdirectories keyed pod UID and container_name.
{
source_labels: ['__meta_kubernetes_pod_uid', '__meta_kubernetes_pod_container_name'],
target_label: '__path__',
separator: '/',
replacement: '/var/log/pods/$1',
},
],
},
],
},
local configMap = $.core.v1.configMap,
promtail_config_map:
configMap.new('promtail') +
configMap.withData({
'promtail.yml': $.util.manifestYaml($.promtail_config),
}),
promtail_args:: {
'client.url': $._config.service_url,
'config.file': '/etc/promtail/promtail.yml',
},
local container = $.core.v1.container,
promtail_container::
container.new('promtail', $._images.promtail) +
container.withPorts($.core.v1.containerPort.new('http-metrics', 80)) +
container.withArgsMixin($.util.mapToFlags($.promtail_args)) +
container.withEnv([
container.envType.fromFieldPath('HOSTNAME', 'spec.nodeName'),
]) +
container.mixin.securityContext.withPrivileged(true) +
container.mixin.securityContext.withRunAsUser(0),
local daemonSet = $.extensions.v1beta1.daemonSet,
promtail_daemonset:
daemonSet.new('promtail', [$.promtail_container]) +
daemonSet.mixin.spec.template.spec.withServiceAccount('promtail') +
$.util.configVolumeMount('promtail', '/etc/promtail') +
$.util.hostVolumeMount('varlog', '/var/log', '/var/log') +
$.util.hostVolumeMount('varlibdockercontainers', '/var/lib/docker/containers', '/var/lib/docker/containers', readOnly=true),
}
Loading…
Cancel
Save