This add make target to deploy a dev version using helm (#586)

* add a dev target to deploy the current image in k8s

* impr/clients: Handle TLS config and MTLS for logcli and promtail (#540)

* impr/clients: Handle TLS config and MTLS for logcli and promtail

* fix/tls: Please gofmt...

* impr/clients: use prometheus HTTPClientConfig for logcli and promtail

* fix/promtail: Set proper Client config name

* impr/promtail: Use prometheus HTTPClientConfig configuration

* adapt with master

* address review

* fix conflicts

* address requested changes

* remove file

* add helm dev targets

* adding back assets

* fix review comments

* Review feedback
pull/600/head
Cyril Tovena 7 years ago committed by GitHub
parent 95310d4cdd
commit 4733221fe3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 17
      Makefile
  2. 3
      production/helm/README.md
  3. 2
      production/helm/loki-stack/Chart.yaml
  4. 9
      production/helm/loki-stack/requirements.yaml
  5. 32
      production/helm/loki-stack/templates/_helpers.tpl
  6. 30
      production/helm/loki-stack/templates/datasources.yaml
  7. 10
      production/helm/loki-stack/values.yaml
  8. 6
      production/helm/loki/values.yaml
  9. 6
      production/helm/promtail/values.yaml
  10. 25
      tools/dev.values.yaml
  11. 37
      tools/helm.yaml

@ -222,6 +222,7 @@ push-latest:
done
helm:
-rm -f production/helm/*/requirements.lock
@set -e; \
helm init -c; \
for chart in $(CHARTS); do \
@ -258,3 +259,19 @@ check_assets: assets
echo "Run 'make assets' and commit the changes to fix the error."; \
exit 1; \
fi
helm-install:
kubectl apply -f tools/helm.yaml
helm init --wait --service-account helm --upgrade
$(MAKE) upgrade-helm
helm-debug: ARGS=--dry-run --debug
helm-debug: helm-upgrade
helm-upgrade: helm
helm upgrade --wait --install $(ARGS) loki-stack ./production/helm/loki-stack \
--set promtail.image.tag=$(IMAGE_TAG) --set loki.image.tag=$(IMAGE_TAG) -f tools/dev.values.yaml
helm-clean:
-helm delete --purge loki-stack

@ -119,3 +119,6 @@ For example, if you update the loki chart, you need to bump the version as follo
```bash
$ # update version loki/Chart.yaml
$ # update version loki-stack/Chart.yaml
```
You can use the `make helm-debug` to test and print out all chart templates. If you want to install helm (tiller) in your cluster use `make helm-install`, to install the current build in your Kubernetes cluster run `make helm-upgrade`.

@ -1,5 +1,5 @@
name: loki-stack
version: 0.9.3
version: 0.9.4
appVersion: 0.0.1
kubeVersion: "^1.10.0-0"
description: "Loki: like Prometheus, but for logs."

@ -7,4 +7,11 @@ dependencies:
condition: promtail.enabled
repository: "file://../promtail"
version: "^0.6.0"
- name: "grafana"
condition: grafana.enabled
version: "~3.3.7"
repository: "https://kubernetes-charts.storage.googleapis.com/"
- name: "prometheus"
condition: prometheus.enabled
version: "~8.11.2"
repository: "https://kubernetes-charts.storage.googleapis.com/"

@ -0,0 +1,32 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "loki-stack.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "loki-stack.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "loki-stack.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

@ -0,0 +1,30 @@
{{- if and .Values.grafana.enabled .Values.grafana.sidecar.datasources.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "loki-stack.fullname" . }}
labels:
app: {{ template "loki-stack.name" . }}
chart: {{ template "loki-stack.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
grafana_datasource: "1"
data:
loki-stack-datasource.yaml: |-
apiVersion: 1
datasources:
{{- if .Values.loki.enabled }}
- name: Loki
type: loki
access: proxy
url: http://{{(include "loki.serviceName" .)}}:{{ .Values.loki.service.port }}
version: 1
{{- end }}
{{- if .Values.prometheus.enabled }}
- name: Prometheus
type: prometheus
access: proxy
url: http://{{ .Values.prometheus.server.fullnameOverride }}:{{ .Values.prometheus.server.service.servicePort }}
version: 1
{{- end }}
{{- end }}

@ -4,3 +4,13 @@ loki:
promtail:
enabled: true
grafana:
enabled: false
sidecar:
datasources:
enabled: true
prometheus:
enabled: false
server:
fullnameOverride: prometheus-server

@ -103,9 +103,9 @@ persistence:
podLabels: {}
## Pod Annotations
podAnnotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "http-metrics"
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "http-metrics"
## Assign a PriorityClassName to pods if set
# priorityClassName:

@ -32,9 +32,9 @@ pipelineStages:
## Pod Labels
podLabels: {}
podAnnotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "http-metrics"
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "http-metrics"
# This should match config.server.http_listen_port
port: 3101

@ -0,0 +1,25 @@
grafana:
enabled: true
service:
type: LoadBalancer
port: 3000
adminPassword: admin
prometheus:
enabled: true
pushgateway:
enabled: false
server:
service:
type: LoadBalancer
servicePort: 9090
promtail:
image:
pullPolicy: Never
loki:
image:
pullPolicy: Never
service:
type: LoadBalancer

@ -0,0 +1,37 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: helm
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: cluster-admin
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- '*'
verbs:
- '*'
Loading…
Cancel
Save