add hack/addons.yaml for promtail and logcli

pull/4881/head
Brett Jones 5 years ago
parent 3a80bdcddd
commit bc62e4b341
No known key found for this signature in database
GPG Key ID: C405583E64F8BBCB
  1. 2
      config/manager/kustomization.yaml
  2. 448
      hack/addons.yaml
  3. 1
      internal/manifests/build.go
  4. 10
      internal/manifests/config/loki-config.yaml
  5. 21
      internal/manifests/config/options.go
  6. 52
      internal/manifests/querier.go
  7. 4
      internal/manifests/query-frontend.go
  8. 14
      internal/manifests/shared.go
  9. 8
      internal/manifests/var.go

@ -13,4 +13,4 @@ kind: Kustomization
images:
- name: controller
newName: quay.io/blockloop/loki-operator
newTag: "1616013530"
newTag: "1616076328"

@ -0,0 +1,448 @@
# This file is used to create additional objects to help development of the operator
# within a cluster. logcli pod helps write queries, promtail writes logs, etc
---
apiVersion: v1
kind: Pod
metadata:
name: logcli
namespace: loki
labels:
app.kubernetes.io/name: logcli
spec:
containers:
- name: logcli
image: docker.io/grafana/logcli:2.2.0-amd64
env:
- name: LOKI_ADDR
value: http://loki-querier-http-lokistack-sample.loki.svc.cluster.local:3100
command: [ "/bin/sh", "-c", "--" ]
args: [ "while true; do sleep 30; done;" ]
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: loki-promtail
namespace: loki
labels:
app.kubernetes.io/name: promtail
spec:
selector:
matchLabels:
app.kubernetes.io/name: promtail
template:
metadata:
labels:
app.kubernetes.io/name: promtail
annotations:
prometheus.io/port: metrics
prometheus.io/scrape: "true"
spec:
containers:
- args:
- -config.file=/etc/promtail/promtail.yaml
- -client.url=http://loki-distributor-http-lokistack-sample.loki.svc.cluster.local:3100/api/prom/push
- -log.level=info
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: docker.io/grafana/promtail:2.1.0
imagePullPolicy: IfNotPresent
name: promtail
ports:
- containerPort: 3101
name: metrics
protocol: TCP
readinessProbe:
failureThreshold: 5
httpGet:
path: /ready
port: metrics
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
procMount: Default
readOnlyRootFilesystem: true
runAsGroup: 0
runAsUser: 0
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/promtail
name: config
- mountPath: /run/promtail
name: run
- mountPath: /var/lib/docker/containers
name: docker
readOnly: true
- mountPath: /var/log/pods
name: pods
readOnly: true
- mountPath: /var/log/journal
name: journal
readOnly: true
serviceAccountName: loki-promtail
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
- configMap:
defaultMode: 420
name: loki-promtail
name: config
- hostPath:
path: /run/promtail
type: ""
name: run
- hostPath:
path: /var/lib/docker/containers
type: ""
name: docker
- hostPath:
path: /var/log/pods
type: ""
name: pods
- hostPath:
path: /var/log/journal
type: ""
name: journal
---
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-promtail
namespace: loki
labels:
app.kubernetes.io/name: promtail
data:
promtail.yaml: |
client:
backoff_config:
min_period: 100ms
max_period: 5s
max_retries: 5
batchsize: 102400
batchwait: 1s
external_labels: {}
timeout: 10s
positions:
filename: /run/promtail/positions.yaml
server:
http_listen_port: 3101
target_config:
sync_period: 10s
scrape_configs:
- job_name: journal
journal:
max_age: 12h
path: /var/log/journal
labels:
job: systemd-journal
relabel_configs:
- source_labels:
- __journal__systemd_unit
target_label: unit
- source_labels:
- __journal__hostname
target_label: hostname
- job_name: kubernetes-pods-name
pipeline_stages:
- docker: {}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_label_name
target_label: __service__
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: drop
regex: ^$
source_labels:
- __service__
- action: replace
replacement: $1
separator: /
source_labels:
- __meta_kubernetes_namespace
- __service__
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: instance
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container_name
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
- job_name: kubernetes-pods-app
pipeline_stages:
- docker: {}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: drop
regex: .+
source_labels:
- __meta_kubernetes_pod_label_name
- source_labels:
- __meta_kubernetes_pod_label_app
target_label: __service__
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: drop
regex: ^$
source_labels:
- __service__
- action: replace
replacement: $1
separator: /
source_labels:
- __meta_kubernetes_namespace
- __service__
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: instance
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container_name
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
- job_name: kubernetes-pods-direct-controllers
pipeline_stages:
- docker: {}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: drop
regex: .+
separator: ''
source_labels:
- __meta_kubernetes_pod_label_name
- __meta_kubernetes_pod_label_app
- action: drop
regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$
source_labels:
- __meta_kubernetes_pod_controller_name
- source_labels:
- __meta_kubernetes_pod_controller_name
target_label: __service__
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: drop
regex: ^$
source_labels:
- __service__
- action: replace
replacement: $1
separator: /
source_labels:
- __meta_kubernetes_namespace
- __service__
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: instance
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label:
container_name
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
- job_name: kubernetes-pods-indirect-controller
pipeline_stages:
- docker: {}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: drop
regex: .+
separator: ''
source_labels:
- __meta_kubernetes_pod_label_name
- __meta_kubernetes_pod_label_app
- action: keep
regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$
source_labels:
- __meta_kubernetes_pod_controller_name
- action: replace
regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$
source_labels:
- __meta_kubernetes_pod_controller_name
target_label: __service__
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: drop
regex: ^$
source_labels:
- __service__
- action: replace
replacement: $1
separator: /
source_labels:
- __meta_kubernetes_namespace
- __service__
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: instance
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container_name
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
- job_name: kubernetes-pods-static
pipeline_stages:
- docker: {}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: drop
regex: ^$
source_labels:
- __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror
- action: replace
source_labels:
- __meta_kubernetes_pod_label_component
target_label: __service__
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: drop
regex: ^$
source_labels:
- __service__
- action: replace
replacement: $1
separator: /
source_labels:
- __meta_kubernetes_namespace
- __service__
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: instance
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container_name
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror
- __meta_kubernetes_pod_container_name
target_label: __path__
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: loki-promtail
namespace: loki
labels:
app.kubernetes.io/name: promtail
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: loki-promtail-clusterrole
labels:
app.kubernetes.io/name: promtail
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs:
- get
- watch
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: loki-promtail-clusterrolebinding
labels:
app.kubernetes.io/name: promtail
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: loki-promtail-clusterrole
subjects:
- kind: ServiceAccount
name: loki-promtail
namespace: loki

@ -14,7 +14,6 @@ func BuildAll(stackName, namespace string) ([]client.Object, error) {
return nil, err
}
res = append(res, cm)
res = append(res, BuildDistributor(stackName)...)
res = append(res, BuildIngester(stackName)...)
res = append(res, BuildQuerier(stackName)...)

@ -8,12 +8,14 @@ distributor:
kvstore:
store: memberlist
frontend:
downstream_url: {{ .Querier.FQDN }}:{{ .Querier.Port }}
compress_responses: true
max_outstanding_per_tenant: 200
log_queries_longer_than: 5s
frontend_worker:
frontend_address: {{ .FrontendWorker.FQDN }}:{{ .FrontendWorker.Port }}
grpc_client_config:
max_send_msg_size: 104857600
# frontend_address: {{ .FrontendWorker.FQDN }}:{{ .FrontendWorker.Port }}
# grpc_client_config:
# max_send_msg_size: 104857600
parallelism: 32
ingester:
chunk_block_size: 262144
@ -87,7 +89,7 @@ server:
http_listen_port: 3100
http_server_idle_timeout: 120s
http_server_write_timeout: 1m
log_level: debug
log_level: info
storage_config:
boltdb:
directory: {{ .StorageDirectory }}/index

@ -3,9 +3,11 @@ package config
// lokiConfigOptions is used to render the loki-config.yaml file template
type Options struct {
// FrontendWorker is required
FrontendWorker FrontendWorker
FrontendWorker Address
// GossipRing is required
GossipRing GossipRing
GossipRing Address
// Querier is required
Querier Address
// Storage is required
StorageDirectory string
@ -13,18 +15,9 @@ type Options struct {
Namespace string
}
type FrontendWorker struct {
// FQDN is the required name of the service or fqdn WITHOUT the port
type Address struct {
// FQDN is required
FQDN string
// Port is the required service port
Port int
}
type GossipRing struct {
// FQDN is the required name of the gossip ring service or fqdn WITHOUT the port
FQDN string
// Port is the required gossip ring service port
// Port is required
Port int
}

@ -18,6 +18,8 @@ import (
func BuildQuerier(stackName string) []client.Object {
return []client.Object{
NewQuerierDeployment(stackName),
NewQuerierGRPCService(stackName),
NewQuerierHTTPService(stackName),
}
}
@ -141,4 +143,52 @@ func NewQuerierDeployment(stackName string) *apps.Deployment {
},
},
}
}
}
func NewQuerierGRPCService(stackName string) *core.Service {
l := ComponentLabels("querier", stackName)
return &core.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: apps.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: serviceNameQuerierGRPC(stackName),
Labels: l,
},
Spec: core.ServiceSpec{
ClusterIP: "None",
Ports: []core.ServicePort{
{
Name: "grpc",
Port: grpcPort,
},
},
Selector: l,
},
}
}
func NewQuerierHTTPService(stackName string) *core.Service {
l := ComponentLabels("querier", stackName)
return &core.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: apps.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: serviceNameQuerierHTTP(stackName),
Labels: l,
},
Spec: core.ServiceSpec{
ClusterIP: "None",
Ports: []core.ServicePort{
{
Name: "http",
Port: httpPort,
},
},
Selector: l,
},
}
}

@ -180,8 +180,8 @@ func NewQueryFrontendHTTPService(stackName string) *core.Service {
ClusterIP: "None",
Ports: []core.ServicePort{
{
Name: "grpc",
Port: grpcPort,
Name: "http",
Port: httpPort,
},
},
Selector: l,

@ -12,12 +12,20 @@ import (
func LokiConfigMap(stackName, namespace string) (*core.ConfigMap, error) {
b, err := config.Build(config.Options{
Namespace: namespace,
StorageDirectory: strings.TrimRight(dataDirectory, "/"),
GossipRing: config.GossipRing{
FrontendWorker: config.Address{
FQDN: "",
Port: 0,
},
GossipRing: config.Address{
FQDN: fqdn(LokiGossipRingService(stackName).GetName(), namespace),
Port: gossipPort,
},
Querier: config.Address{
FQDN: serviceNameQuerierHTTP(stackName),
Port: httpPort,
},
StorageDirectory: strings.TrimRight(dataDirectory, "/"),
Namespace: namespace,
})
if err != nil {
return nil, err

@ -33,6 +33,14 @@ func GossipLabels() map[string]string {
}
}
func serviceNameQuerierHTTP(stackName string) string {
return fmt.Sprintf("loki-querier-http-%s", stackName)
}
func serviceNameQuerierGRPC(stackName string) string {
return fmt.Sprintf("loki-querier-grpc-%s", stackName)
}
func serviceNameGossipRing(stackName string) string {
return fmt.Sprintf("loki-gossip-ring-%s", stackName)
}

Loading…
Cancel
Save