Merge pull request #4945 from grafana/upstream-operator

Add loki operator subproject
pull/4964/head
Ed Welch 3 years ago committed by GitHub
commit 56b70462e8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 32
      .github/workflows/operator-bundle.yaml
  2. 132
      .github/workflows/operator-images.yaml
  3. 35
      .github/workflows/operator-scorecard.yaml
  4. 102
      .github/workflows/operator.yaml
  5. 12
      operator/.bingo/.gitignore
  6. 14
      operator/.bingo/README.md
  7. 55
      operator/.bingo/Variables.mk
  8. 5
      operator/.bingo/bingo.mod
  9. 5
      operator/.bingo/controller-gen.mod
  10. 1
      operator/.bingo/go.mod
  11. 5
      operator/.bingo/gofumpt.mod
  12. 5
      operator/.bingo/golangci-lint.mod
  13. 5
      operator/.bingo/kustomize.mod
  14. 13
      operator/.bingo/operator-sdk.mod
  15. 22
      operator/.bingo/variables.env
  16. 5
      operator/.dockerignore
  17. 25
      operator/.gitignore
  18. 43
      operator/.golangci.yaml
  19. 20
      operator/CONTRIBUTING.md
  20. 28
      operator/Dockerfile
  21. 220
      operator/Makefile
  22. 19
      operator/PROJECT
  23. 29
      operator/README.md
  24. 1
      operator/_config.yml
  25. 20
      operator/api/v1beta1/groupversion_info.go
  26. 660
      operator/api/v1beta1/lokistack_types.go
  27. 666
      operator/api/v1beta1/zz_generated.deepcopy.go
  28. 20
      operator/bundle.Dockerfile
  29. 28
      operator/bundle/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml
  30. 23
      operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml
  31. 25
      operator/bundle/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml
  32. 16
      operator/bundle/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml
  33. 25
      operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml
  34. 22
      operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml
  35. 701
      operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
  36. 951
      operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
  37. 14
      operator/bundle/metadata/annotations.yaml
  38. 70
      operator/bundle/tests/scorecard/config.yaml
  39. 26
      operator/calculator.Dockerfile
  40. 152
      operator/cmd/loki-broker/main.go
  41. 63
      operator/cmd/size-calculator/main.go
  42. 25
      operator/config/certmanager/certificate.yaml
  43. 5
      operator/config/certmanager/kustomization.yaml
  44. 16
      operator/config/certmanager/kustomizeconfig.yaml
  45. 695
      operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
  46. 21
      operator/config/crd/kustomization.yaml
  47. 19
      operator/config/crd/kustomizeconfig.yaml
  48. 8
      operator/config/crd/patches/cainjection_in_lokistacks.yaml
  49. 14
      operator/config/crd/patches/webhook_in_lokistacks.yaml
  50. 11
      operator/config/manager/controller_manager_config.yaml
  51. 16
      operator/config/manager/kustomization.yaml
  52. 38
      operator/config/manager/manager.yaml
  53. 436
      operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml
  54. 4
      operator/config/manifests/kustomization.yaml
  55. 22
      operator/config/overlays/development/kustomization.yaml
  56. 10
      operator/config/overlays/development/manager_image_pull_policy_patch.yaml
  57. 14
      operator/config/overlays/development/manager_related_image_patch.yaml
  58. 38
      operator/config/overlays/development/minio/deployment.yaml
  59. 5
      operator/config/overlays/development/minio/kustomization.yaml
  60. 12
      operator/config/overlays/development/minio/pvc.yaml
  61. 10
      operator/config/overlays/development/minio/secret.yaml
  62. 12
      operator/config/overlays/development/minio/service.yaml
  63. 90
      operator/config/overlays/openshift/kustomization.yaml
  64. 29
      operator/config/overlays/openshift/manager_auth_proxy_patch.yaml
  65. 16
      operator/config/overlays/openshift/manager_related_image_patch.yaml
  66. 15
      operator/config/overlays/openshift/manager_run_flags_patch.yaml
  67. 17
      operator/config/overlays/openshift/prometheus_service_monitor_patch.yaml
  68. 8
      operator/config/overlays/openshift/size-calculator/cluster_monitoring_config.yaml
  69. 23
      operator/config/overlays/openshift/size-calculator/kustomization.yaml
  70. 55
      operator/config/overlays/openshift/size-calculator/logfile_metric_daemonset.yaml
  71. 13
      operator/config/overlays/openshift/size-calculator/logfile_metric_role.yaml
  72. 11
      operator/config/overlays/openshift/size-calculator/logfile_metric_role_binding.yaml
  73. 43
      operator/config/overlays/openshift/size-calculator/logfile_metric_scc.yaml
  74. 16
      operator/config/overlays/openshift/size-calculator/logfile_metric_service.yaml
  75. 9
      operator/config/overlays/openshift/size-calculator/logfile_metric_service_account.yaml
  76. 20
      operator/config/overlays/openshift/size-calculator/logfile_metric_service_monitor.yaml
  77. 38
      operator/config/overlays/openshift/size-calculator/storage_size_calculator.yaml
  78. 6
      operator/config/overlays/openshift/size-calculator/storage_size_calculator_config.yaml
  79. 9
      operator/config/overlays/openshift/size-calculator/user_workload_monitoring_config.yaml
  80. 88
      operator/config/overlays/production/kustomization.yaml
  81. 31
      operator/config/overlays/production/manager_auth_proxy_patch.yaml
  82. 14
      operator/config/overlays/production/manager_related_image_patch.yaml
  83. 11
      operator/config/overlays/production/manager_run_flags_patch.yaml
  84. 18
      operator/config/overlays/production/prometheus_service_monitor_patch.yaml
  85. 2
      operator/config/prometheus/kustomization.yaml
  86. 12
      operator/config/prometheus/monitor.yaml
  87. 7
      operator/config/rbac/auth_proxy_client_clusterrole.yaml
  88. 13
      operator/config/rbac/auth_proxy_role.yaml
  89. 12
      operator/config/rbac/auth_proxy_role_binding.yaml
  90. 15
      operator/config/rbac/auth_proxy_service.yaml
  91. 11
      operator/config/rbac/kustomization.yaml
  92. 27
      operator/config/rbac/leader_election_role.yaml
  93. 12
      operator/config/rbac/leader_election_role_binding.yaml
  94. 24
      operator/config/rbac/lokistack_editor_role.yaml
  95. 20
      operator/config/rbac/lokistack_viewer_role.yaml
  96. 18
      operator/config/rbac/prometheus_role.yaml
  97. 16
      operator/config/rbac/prometheus_role_binding.yaml
  98. 131
      operator/config/rbac/role.yaml
  99. 12
      operator/config/rbac/role_binding.yaml
  100. 4
      operator/config/samples/kustomization.yaml
  101. Some files were not shown because too many files have changed in this diff Show More

@ -0,0 +1,32 @@
name: operator bundle
on:
push:
paths:
- 'operator/**'
branches: [ master ]
pull_request:
paths:
- 'operator/**'
branches: [ master ]
jobs:
build:
name: build
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
go: ['1.16']
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
id: go
- uses: actions/checkout@v2
- name: Install make
run: sudo apt-get install make
- name: make bundle
run: make bundle
working-directory: ./operator

@ -0,0 +1,132 @@
name: operator images
on:
push:
paths:
- 'operator/**'
branches:
- master
env:
IMAGE_REGISTRY: quay.io
IMAGE_ORGANIZATION: openshift-logging
IMAGE_OPERATOR_NAME: loki-operator
IMAGE_BUNDLE_NAME: loki-operator-bundle
IMAGE_CALCULATOR_NAME: storage-size-calculator
jobs:
publish-manager:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Quay.io
uses: docker/login-action@v1
with:
registry: quay.io
logout: true
username: ${{ secrets.OPENSHIFT_LOGGING_USER }}
password: ${{ secrets.OPENSHIFT_LOGGING_PASS }}
- name: Get image tags
id: image_tags
run: |
echo -n ::set-output name=IMAGE_TAGS::
PULLSPEC="$IMAGE_REGISTRY/$IMAGE_ORGANIZATION/$IMAGE_OPERATOR_NAME"
TAGS=("$PULLSPEC:latest", "$PULLSPEC:v0.0.1")
BUILD_DATE="$(date -u +'%Y-%m-%d')"
VCS_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
VCS_REF="$(git rev-parse --short HEAD)"
TAGS+=("$PULLSPEC:$VCS_BRANCH-$BUILD_DATE-$VCS_REF")
( IFS=$','; echo "${TAGS[*]}" )
- name: Build and publish image on quay.io
uses: docker/build-push-action@v2
with:
context: ./operator
push: true
tags: "${{ steps.image_tags.outputs.IMAGE_TAGS }}"
publish-bundle:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Quay.io
uses: docker/login-action@v1
with:
registry: quay.io
logout: true
username: ${{ secrets.OPENSHIFT_LOGGING_USER }}
password: ${{ secrets.OPENSHIFT_LOGGING_PASS }}
- name: Get image tags
id: image_tags
run: |
echo -n ::set-output name=IMAGE_TAGS::
PULLSPEC="$IMAGE_REGISTRY/$IMAGE_ORGANIZATION/$IMAGE_BUNDLE_NAME"
TAGS=("$PULLSPEC:latest", "$PULLSPEC:v0.0.1")
BUILD_DATE="$(date -u +'%Y-%m-%d')"
VCS_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
VCS_REF="$(git rev-parse --short HEAD)"
TAGS+=("$PULLSPEC:$VCS_BRANCH-$BUILD_DATE-$VCS_REF")
( IFS=$','; echo "${TAGS[*]}" )
- name: Build and publish image on quay.io
uses: docker/build-push-action@v2
with:
context: ./operator
file: bundle.Dockerfile
push: true
tags: "${{ steps.image_tags.outputs.IMAGE_TAGS }}"
publish-size-calculator:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Quay.io
uses: docker/login-action@v1
with:
registry: quay.io
logout: true
username: ${{ secrets.OPENSHIFT_LOGGING_USER }}
password: ${{ secrets.OPENSHIFT_LOGGING_PASS }}
- name: Get image tags
id: image_tags
run: |
echo -n ::set-output name=IMAGE_TAGS::
PULLSPEC="$IMAGE_REGISTRY/$IMAGE_ORGANIZATION/$IMAGE_CALCULATOR_NAME"
TAGS=("$PULLSPEC:latest", "$PULLSPEC:v0.0.1")
BUILD_DATE="$(date -u +'%Y-%m-%d')"
VCS_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
VCS_REF="$(git rev-parse --short HEAD)"
TAGS+=("$PULLSPEC:$VCS_BRANCH-$BUILD_DATE-$VCS_REF")
( IFS=$','; echo "${TAGS[*]}" )
- name: Build and publish image on quay.io
uses: docker/build-push-action@v2
with:
context: ./operator
file: calculator.Dockerfile
push: true
tags: "${{ steps.image_tags.outputs.IMAGE_TAGS }}"

@ -0,0 +1,35 @@
name: operator scorecard
on:
push:
paths:
- 'operator/**'
branches: [ master ]
pull_request:
paths:
- 'operator/**'
branches: [ master ]
jobs:
build:
name: scorecard
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
go: ['1.16']
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
id: go
- uses: engineerd/setup-kind@v0.5.0
with:
version: "v0.11.1"
- uses: actions/checkout@v2
- name: Install make
run: sudo apt-get install make
- name: Run scorecard
run: make scorecard
working-directory: ./operator

@ -0,0 +1,102 @@
name: operator build
on:
push:
paths:
- 'operator/**'
branches: [ master ]
pull_request:
paths:
- 'operator/**'
branches: [ master ]
jobs:
lint:
name: lint
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
go: ['1.16']
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
id: go
- uses: actions/checkout@v2
- name: Lint
uses: golangci/golangci-lint-action@v2
with:
version: v1.38
skip-go-installation: true
only-new-issues: true
args: --timeout=2m
working-directory: ./operator
build-manager:
name: Build Manager
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
go: ['1.16']
steps:
- name: Install make
run: sudo apt-get install make
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
id: go
- uses: actions/checkout@v2
- name: Build Manager
working-directory: ./operator
run: |-
make manager && git diff --exit-code
build-broker:
name: Build Broker
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
go: ['1.16']
steps:
- name: Install make
run: sudo apt-get install make
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
id: go
- uses: actions/checkout@v2
- name: Build Broker
working-directory: ./operator
run: |-
make bin/loki-broker && git diff --exit-code
test:
name: test
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
go: ['1.16']
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
id: go
- uses: actions/checkout@v2
- name: Run tests
working-directory: ./operator
run: go test -coverprofile=profile.cov ./...
- name: Send coverage
uses: shogo82148/actions-goveralls@v1
working-directory: ./operator
with:
path-to-profile: profile.cov
flag-name: Go-${{ matrix.go }}
shallow: true

@ -0,0 +1,12 @@
# Ignore everything
*
# But not these files:
!.gitignore
!*.mod
!README.md
!Variables.mk
!variables.env
*tmp.mod

@ -0,0 +1,14 @@
# Project Development Dependencies.
This is directory which stores Go modules with pinned buildable package that is used within this repository, managed by https://github.com/bwplotka/bingo.
* Run `bingo get` to install all tools having each own module file in this directory.
* Run `bingo get <tool>` to install <tool> that have own module file in this directory.
* For Makefile: Make sure to put `include .bingo/Variables.mk` in your Makefile, then use $(<upper case tool name>) variable where <tool> is the .bingo/<tool>.mod.
* For shell: Run `source .bingo/variables.env` to source all environment variable for each tool.
* For go: Import `.bingo/variables.go` to for variable names.
* See https://github.com/bwplotka/bingo or -h on how to add, remove or change binaries dependencies.
## Requirements
* Go 1.14+

@ -0,0 +1,55 @@
# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.5.1. DO NOT EDIT.
# All tools are designed to be build inside $GOBIN.
BINGO_DIR := $(dir $(lastword $(MAKEFILE_LIST)))
GOPATH ?= $(shell go env GOPATH)
GOBIN ?= $(firstword $(subst :, ,${GOPATH}))/bin
GO ?= $(shell which go)
# Below generated variables ensure that every time a tool under each variable is invoked, the correct version
# will be used; reinstalling only if needed.
# For example for bingo variable:
#
# In your main Makefile (for non array binaries):
#
#include .bingo/Variables.mk # Assuming -dir was set to .bingo .
#
#command: $(BINGO)
# @echo "Running bingo"
# @$(BINGO) <flags/args..>
#
BINGO := $(GOBIN)/bingo-v0.4.0
$(BINGO): $(BINGO_DIR)/bingo.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
@echo "(re)installing $(GOBIN)/bingo-v0.4.0"
@cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.4.0 "github.com/bwplotka/bingo"
CONTROLLER_GEN := $(GOBIN)/controller-gen-v0.5.0
$(CONTROLLER_GEN): $(BINGO_DIR)/controller-gen.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
@echo "(re)installing $(GOBIN)/controller-gen-v0.5.0"
@cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=controller-gen.mod -o=$(GOBIN)/controller-gen-v0.5.0 "sigs.k8s.io/controller-tools/cmd/controller-gen"
GOFUMPT := $(GOBIN)/gofumpt-v0.1.1
$(GOFUMPT): $(BINGO_DIR)/gofumpt.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
@echo "(re)installing $(GOBIN)/gofumpt-v0.1.1"
@cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=gofumpt.mod -o=$(GOBIN)/gofumpt-v0.1.1 "mvdan.cc/gofumpt"
GOLANGCI_LINT := $(GOBIN)/golangci-lint-v1.38.0
$(GOLANGCI_LINT): $(BINGO_DIR)/golangci-lint.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
@echo "(re)installing $(GOBIN)/golangci-lint-v1.38.0"
@cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v1.38.0 "github.com/golangci/golangci-lint/cmd/golangci-lint"
KUSTOMIZE := $(GOBIN)/kustomize-v3.8.7
$(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
@echo "(re)installing $(GOBIN)/kustomize-v3.8.7"
@cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=kustomize.mod -o=$(GOBIN)/kustomize-v3.8.7 "sigs.k8s.io/kustomize/kustomize/v3"
OPERATOR_SDK := $(GOBIN)/operator-sdk-v1.11.0
$(OPERATOR_SDK): $(BINGO_DIR)/operator-sdk.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
@echo "(re)installing $(GOBIN)/operator-sdk-v1.11.0"
@cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=operator-sdk.mod -o=$(GOBIN)/operator-sdk-v1.11.0 "github.com/operator-framework/operator-sdk/cmd/operator-sdk"

@ -0,0 +1,5 @@
module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
go 1.16
require github.com/bwplotka/bingo v0.4.0

@ -0,0 +1,5 @@
module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
go 1.16
require sigs.k8s.io/controller-tools v0.5.0 // cmd/controller-gen

@ -0,0 +1 @@
module _ // Fake go.mod auto-created by 'bingo' for go -moddir compatibility with non-Go projects. Commit this file, together with other .mod files.

@ -0,0 +1,5 @@
module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
go 1.16
require mvdan.cc/gofumpt v0.1.1

@ -0,0 +1,5 @@
module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
go 1.16
require github.com/golangci/golangci-lint v1.38.0 // cmd/golangci-lint

@ -0,0 +1,5 @@
module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
go 1.16
require sigs.k8s.io/kustomize/kustomize/v3 v3.8.7

@ -0,0 +1,13 @@
module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
go 1.16
replace github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible
replace github.com/containerd/containerd => github.com/containerd/containerd v1.4.3
replace github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.10.0
replace golang.org/x/text => golang.org/x/text v0.3.3
require github.com/operator-framework/operator-sdk v1.11.0 // cmd/operator-sdk

@ -0,0 +1,22 @@
# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.5.1. DO NOT EDIT.
# All tools are designed to be build inside $GOBIN.
# Those variables will work only until 'bingo get' was invoked, or if tools were installed via Makefile's Variables.mk.
GOBIN=${GOBIN:=$(go env GOBIN)}
if [ -z "$GOBIN" ]; then
GOBIN="$(go env GOPATH)/bin"
fi
BINGO="${GOBIN}/bingo-v0.4.0"
CONTROLLER_GEN="${GOBIN}/controller-gen-v0.5.0"
GOFUMPT="${GOBIN}/gofumpt-v0.1.1"
GOLANGCI_LINT="${GOBIN}/golangci-lint-v1.38.0"
KUSTOMIZE="${GOBIN}/kustomize-v3.8.7"
OPERATOR_SDK="${GOBIN}/operator-sdk-v1.11.0"

@ -0,0 +1,5 @@
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Ignore all files which are not go type
!**/*.go
!**/*.mod
!**/*.sum

@ -0,0 +1,25 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
bin
testbin/*
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.*
# editor and IDE paraphernalia
.idea
*.swp
*.swo
*~

@ -0,0 +1,43 @@
---
run:
tests: false
skip-files:
- "example_.+_test.go$"
# golangci.com configuration
# https://github.com/golangci/golangci/wiki/Configuration
linters-settings:
govet:
check-shadowing: true
maligned:
suggest-new: true
misspell:
locale: US
linters:
enable-all: false
enable:
- deadcode # Finds unused code
- errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases
- goerr113 # checks that errors are wrapped according to go 1.13 error wrapping tools
- gofumpt # checks that gofumpt was run on all source code
- goimports # checks that goimports was run on all source code
- golint
- gosimple # Linter for Go source code that specializes in simplifying a code
- govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
- ineffassign # Detects when assignments to existing variables are not used
- misspell # spell checker
- rowserrcheck # checks whether Err of rows is checked successfully
- staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks
- structcheck # Finds unused struct fields
- typecheck # Like the front-end of a Go compiler, parses and type-checks Go code
- unused # Checks Go code for unused constants, variables, functions and types
- varcheck # Finds unused global variables and constants
issues:
exclude-use-default: false
exclude-rules:
# - text: "could be of size"
# path: api/v1beta1/lokistack_types.go
# linters:
# - maligned

@ -0,0 +1,20 @@
Contributing to Loki Operator
## Ideology
OpenShift has proven to be a powerful and successful platform for running containers in production. Our primary goal is to bring Loki Operator to our customers. That being said, it is a very large platform intended for large-scale production use. It is not intended to be ephemeral.
The tools required to run and test an OCP cluster are complex and cumbersome. The current processes to build an OCP cluster include the slack cluster-bot, openshift-install script, and CRC. The fastest route to create a working OCP cluster is 45 minutes. CRC *may* be faster, but it requires over [half of your local machine's resources](https://coreos.slack.com/archives/GGUR75P60/p1591803889037800) and doesn’t handle sleeping/suspending very well. Using openshift-install comes with its own [headaches](https://coreos.slack.com/archives/GGUR75P60/p1615458361119300). These blockers cause a significant amount of [wasted time](https://coreos.slack.com/archives/GGUR75P60/p1599242159479000?thread_ts=1599241354.478700&cid=GGUR75P60) that could be spent on more valuable things.
Nevertheless, I argue that none of this is necessary. The problems are caused when we bastardize a large, complex, production platform for testing and tooling. OpenShift is a superset of Kubernetes. Operators are now Kubernetes native. Given this reality, we have called the Loki Operator a Kubernetes operator rather than an OpenShift operator. This may seem like a trivial delineation, but it isn’t. The operator has been designed from the beginning using Kubernetes tools and APIs. This has allowed us to build, test, and deploy in very little time with very little effort. It is not uncommon to create a pull request and have it [reviewed and merged](https://github.com/grafana/loki/pulls?q=is%3Apr+is%3Aclosed) within 15 minutes.
There are certainly OCP exclusives that we want to program into the Loki Operator, but this shouldn’t block or break the primary objectives. In other words, the Loki Operator should be Kubernetes first and OpenShift second. The Loki Operator should be open to using the OpenShift APIs without requiring them. All tools, automation, scripts, make targets, etc, should work naturally with Kubernetes and Kubernetes compatible APIs. <u>OCP exclusives should be opt-in</u>. It might be natural for you to think this causes obstruction for deploying to OCP, but that is far from true. Packaging for OCP should be a scripted process that, once opted in, should build all of the necessary components. So far, it has proven to be successful.
## Tooling
We use [KinD](https://github.com/kubernetes-sigs/kind) to deploy and test the Loki Operator. We have had no compatibility issues, no wasted time on a learning curve, no failed clusters, no token expirations, no cluster expirations, no spinning laptop fans from gluttonous virtual machines, etc. It takes approximately 20 seconds to create a local KinD cluster and your machine won’t even notice it’s running. The cluster is fully compatible with all Kubernetes APIs and the operator runs on KinD perfectly. After your KinD cluster is created your kubeconfig is updated and the Makefile will work. The Makefiles and scripts are written to work with kubectl. This abstraction prevents any unnecessary complications caused by magic processes like deploying images to internal clusters, etc.
## Testing
Tests should be succinct and without dependencies. This means that unit tests are the de-facto form of testing the Loki Operator. Unit tests are written with the standard Go library using [testify](https://github.com/stretchr/testify) for assertions. [Counterfeiter](https://github.com/maxbrunsfeld/counterfeiter) is included for generating test fakes and stubs for all dependencies. This library provides an API for generating fake implementations of interfaces for injecting them into testable units of code. Unit tests should implement or stub *only the parts required to test*. Large, all-inclusive structs should be avoided in favor of concise, single responsibility functions. This encourages small tests with minimal assertions to keep them hyper-focused, making tests easy to create *and* maintain.

@ -0,0 +1,28 @@
# Build the manager binary
FROM golang:1.16 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the go source
COPY main.go main.go
COPY api/ api/
COPY controllers/ controllers/
COPY internal/ internal/
# Build
RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532
ENTRYPOINT ["/manager"]

@ -0,0 +1,220 @@
# include the bingo binary variables. This enables the bingo versions to be
# referenced here as make variables. For example: $(GOLANGCI_LINT)
include .bingo/Variables.mk
# set the default target here, because the include above will automatically set
# it to the first defined target
.DEFAULT_GOAL := default
default: all
# CLUSTER_LOGGING_VERSION
# defines the version of the OpenShift Cluster Logging product.
# Updates this value when a new version of the product should include this operator and its bundle.
CLUSTER_LOGGING_VERSION ?= 5.1.preview.1
# CLUSTER_LOGGING_NS
# defines the default namespace of the OpenShift Cluster Logging product.
CLUSTER_LOGGING_NS ?= openshift-logging
# VERSION
# defines the project version for the bundle.
# Update this value when you upgrade the version of your project.
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= v0.0.1
CHANNELS ?= "tech-preview"
DEFAULT_CHANNELS ?= "tech-preview"
# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "preview,fast,stable")
# To re-generate a bundle for other specific channels without changing the standard setup, you can:
# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=preview,fast,stable)
# - use environment variables to overwrite this value (e.g export CHANNELS="preview,fast,stable")
ifneq ($(origin CHANNELS), undefined)
BUNDLE_CHANNELS := --channels=$(CHANNELS)
endif
# DEFAULT_CHANNEL defines the default channel used in the bundle.
# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable")
# To re-generate a bundle for any other default channel without changing the default setup, you can:
# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable)
# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable")
ifneq ($(origin DEFAULT_CHANNEL), undefined)
BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
endif
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
REGISTRY_ORG ?= openshift-logging
# BUNDLE_IMG defines the image:tag used for the bundle.
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
BUNDLE_IMG ?= quay.io/$(REGISTRY_ORG)/loki-operator-bundle:$(VERSION)
CALCULATOR_IMG ?= quay.io/$(REGISTRY_ORG)/storage-size-calculator:latest
GO_FILES := $(shell find . -type f -name '*.go')
# Image URL to use all building/pushing image targets
IMG ?= quay.io/$(REGISTRY_ORG)/loki-operator:$(VERSION)
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false"
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
all: generate lint manager bin/loki-broker
OCI_RUNTIME ?= $(shell which podman || which docker)
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Development
.PHONY: deps
deps: go.mod go.sum
go mod tidy
go mod download
go mod verify
cli: deps bin/loki-broker ## Build loki-broker CLI binary
bin/loki-broker: $(GO_FILES) | generate
go build -o $@ ./cmd/loki-broker/
manager: deps generate ## Build manager binary
go build -o bin/manager main.go
size-calculator: deps generate ## Build size-calculator binary
go build -o bin/size-calculator main.go
go-generate: ## Run go generate
go generate ./...
generate: $(CONTROLLER_GEN) ## Generate controller and crd code
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc.
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
test: deps generate go-generate lint manifests ## Run tests
test: $(GO_FILES)
go test ./... -coverprofile cover.out
scorecard: generate go-generate bundle ## Run scorecard test
$(OPERATOR_SDK) scorecard bundle
lint: $(GOLANGCI_LINT) | generate ## Run golangci-lint on source code.
$(GOLANGCI_LINT) run ./...
fmt: $(GOFUMPT) ## Run gofumpt on source code.
find . -type f -name '*.go' -not -path '**/fake_*.go' -exec $(GOFUMPT) -s -w {} \;
oci-build: ## Build the image
$(OCI_RUNTIME) build -t ${IMG} .
oci-push: ## Push the image
$(OCI_RUNTIME) push ${IMG}
.PHONY: bundle ## Generate bundle manifests and metadata, then validate generated files.
bundle: manifests $(KUSTOMIZE) $(OPERATOR_SDK)
$(OPERATOR_SDK) generate kustomize manifests -q
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle -q --overwrite --version $(subst v,,$(VERSION)) $(BUNDLE_METADATA_OPTS)
$(OPERATOR_SDK) bundle validate ./bundle
.PHONY: bundle-build
bundle-build: ## Build the bundle image.
$(OCI_RUNTIME) build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
##@ Deployment
run: generate manifests ## Run against the configured Kubernetes cluster in ~/.kube/config
go run ./main.go
install: manifests $(KUSTOMIZE) ## Install CRDs into a cluster
$(KUSTOMIZE) build config/crd | kubectl apply -f -
uninstall: manifests $(KUSTOMIZE) ## Uninstall CRDs from a cluster
$(KUSTOMIZE) build config/crd | kubectl delete -f -
deploy: manifests $(KUSTOMIZE) ## Deploy controller in the configured Kubernetes cluster in ~/.kube/config
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/overlays/development | kubectl apply -f -
undeploy: ## Undeploy controller from the configured Kubernetes cluster in ~/.kube/config
$(KUSTOMIZE) build config/overlays/development | kubectl delete -f -
# Build and push the bundle image to a container registry.
.PHONY: olm-deploy-bundle
olm-deploy-bundle: bundle bundle-build
$(MAKE) oci-push IMG=$(BUNDLE_IMG)
# Build and push the operator image to a container registry.
.PHONY: olm-deploy-operator
olm-deploy-operator: oci-build oci-push
.PHONY: olm-deploy
ifeq ($(or $(findstring openshift-logging,$(IMG)),$(findstring openshift-logging,$(BUNDLE_IMG))),openshift-logging)
olm-deploy: ## Deploy the operator bundle and the operator via OLM into an Kubernetes cluster selected via KUBECONFIG.
$(error Set variable REGISTRY_ORG to use a custom container registry org account for local development)
else
olm-deploy: olm-deploy-bundle olm-deploy-operator $(OPERATOR_SDK)
kubectl create ns $(CLUSTER_LOGGING_NS)
kubectl label ns/$(CLUSTER_LOGGING_NS) openshift.io/cluster-monitoring=true --overwrite
$(OPERATOR_SDK) run bundle -n $(CLUSTER_LOGGING_NS) --install-mode OwnNamespace $(BUNDLE_IMG)
endif
# Build and push the secret for the S3 storage
.PHONY: olm-deploy-example-storage-secret
olm-deploy-example-storage-secret:
hack/deploy-example-secret.sh $(CLUSTER_LOGGING_NS)
.PHONY: olm-deploy-example
olm-deploy-example: olm-deploy olm-deploy-example-storage-secret ## Deploy example LokiStack custom resource
kubectl -n $(CLUSTER_LOGGING_NS) create -f hack/lokistack_dev.yaml
.PHONY: olm-undeploy
olm-undeploy: $(OPERATOR_SDK) ## Cleanup deployments of the operator bundle and the operator via OLM on an OpenShift cluster selected via KUBECONFIG.
$(OPERATOR_SDK) cleanup loki-operator
kubectl delete ns $(CLUSTER_LOGGING_NS)
.PHONY: deploy-size-calculator
ifeq ($(findstring openshift-logging,$(CALCULATOR_IMG)),openshift-logging)
deploy-size-calculator: ## Deploy storage size calculator (OpenShift only!)
$(error Set variable REGISTRY_ORG to use a custom container registry org account for local development)
else
deploy-size-calculator: $(KUSTOMIZE) ## Deploy storage size calculator (OpenShift only!)
kubectl apply -f config/overlays/openshift/size-calculator/cluster_monitoring_config.yaml
kubectl apply -f config/overlays/openshift/size-calculator/user_workload_monitoring_config.yaml
./hack/deploy-prometheus-secret.sh
$(KUSTOMIZE) build config/overlays/openshift/size-calculator | kubectl apply -f -
endif
.PHONY: undeploy-size-calculator
undeploy-size-calculator: ## Undeploy storage size calculator
$(KUSTOMIZE) build config/overlays/openshift/size-calculator | kubectl delete -f -
oci-build-calculator: ## Build the calculator image
$(OCI_RUNTIME) build -f calculator.Dockerfile -t $(CALCULATOR_IMG) .
oci-push-calculator: ## Push the calculator image
$(OCI_RUNTIME) push $(CALCULATOR_IMG)

@ -0,0 +1,19 @@
domain: grafana.com
layout:
- go.kubebuilder.io/v3
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: loki-operator
repo: github.com/grafana/loki
resources:
- api:
crdVersion: v1beta1
namespaced: true
controller: true
domain: grafana.com
group: loki
kind: LokiStack
path: github.com/grafana/loki/operator/api/v1beta1
version: v1beta1
version: "3"

@ -0,0 +1,29 @@
![](img/loki-operator.png)
# Loki Operator
This is the Kubernetes Operator for [Loki](https://grafana.com/docs/loki/latest/)
provided by the Grafana Loki SIG operator. **This is currently a work in
progress and is subject to large scale changes that will break any dependencies.
Do not use this in any production environment.**
## Development
Requirements:
1. Running Kubernetes cluster. Our team uses
[KinD](https://kind.sigs.k8s.io/docs/user/quick-start/) or
[K3s](https://k3s.io/) for simplicity.
1. A container registry that you and your Kubernetes cluster can reach. We
recommend [quay.io](https://quay.io/signin/).
Build and push the container image and then deploy the operator with `make
oci-build oci-push deploy IMG=quay.io/my-team/loki-operator:latest`. This will
deploy to your active Kubernetes/OpenShift cluster defined by your local
[kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/).
For detailed step-by-step guide on how to start development and testing on Kind and OpenShift,
check our [documentation](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md)
Also, there is a [document](https://github.com/grafana/loki/blob/master/operator/docs/hack_operator_make_run.md) which
demonstrates how to use Loki Operator for development and testing locally without deploying the operator each time on Kind and OpenShift.

@ -0,0 +1 @@
theme: jekyll-theme-slate

@ -0,0 +1,20 @@
// Package v1beta1 contains API Schema definitions for the loki v1beta1 API group
// +kubebuilder:object:generate=true
// +groupName=loki.grafana.com
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "loki.grafana.com", Version: "v1beta1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

@ -0,0 +1,660 @@
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// ManagementStateType defines the type for CR management states.
//
// +kubebuilder:validation:Enum=Managed;Unmanaged
type ManagementStateType string
const (
// ManagementStateManaged when the LokiStack custom resource should be
// reconciled by the operator.
ManagementStateManaged ManagementStateType = "Managed"
// ManagementStateUnmanaged when the LokiStack custom resource should not be
// reconciled by the operator.
ManagementStateUnmanaged ManagementStateType = "Unmanaged"
)
// LokiStackSizeType declares the type for loki cluster scale outs.
//
// +kubebuilder:validation:Enum="1x.extra-small";"1x.small";"1x.medium"
type LokiStackSizeType string
const (
// SizeOneXExtraSmall defines the size of a single Loki deployment
// with extra small resources/limits requirements and without HA support.
// This size is ultimately dedicated for development and demo purposes.
// DO NOT USE THIS IN PRODUCTION!
//
// FIXME: Add clear description of ingestion/query performance expectations.
SizeOneXExtraSmall LokiStackSizeType = "1x.extra-small"
// SizeOneXSmall defines the size of a single Loki deployment
// with small resources/limits requirements and HA support for all
// Loki components. This size is dedicated for setup **without** the
// requirement for single replication factor and auto-compaction.
//
// FIXME: Add clear description of ingestion/query performance expectations.
SizeOneXSmall LokiStackSizeType = "1x.small"
// SizeOneXMedium defines the size of a single Loki deployment
// with small resources/limits requirements and HA support for all
// Loki components. This size is dedicated for setup **with** the
// requirement for single replication factor and auto-compaction.
//
// FIXME: Add clear description of ingestion/query performance expectations.
SizeOneXMedium LokiStackSizeType = "1x.medium"
)
// SubjectKind is a kind of LokiStack Gateway RBAC subject.
//
// +kubebuilder:validation:Enum=user;group
type SubjectKind string
const (
// User represents a subject that is a user.
User SubjectKind = "user"
// Group represents a subject that is a group.
Group SubjectKind = "group"
)
// Subject represents a subject that has been bound to a role.
type Subject struct {
Name string `json:"name"`
Kind SubjectKind `json:"kind"`
}
// RoleBindingsSpec binds a set of roles to a set of subjects.
type RoleBindingsSpec struct {
Name string `json:"name"`
Subjects []Subject `json:"subjects"`
Roles []string `json:"roles"`
}
// PermissionType is a LokiStack Gateway RBAC permission.
//
// +kubebuilder:validation:Enum=read;write
type PermissionType string
const (
// Write gives access to write data to a tenant.
Write PermissionType = "write"
// Read gives access to read data from a tenant.
Read PermissionType = "read"
)
// RoleSpec describes a set of permissions to interact with a tenant.
type RoleSpec struct {
Name string `json:"name"`
Resources []string `json:"resources"`
Tenants []string `json:"tenants"`
Permissions []PermissionType `json:"permissions"`
}
// OPASpec defines the opa configuration spec for lokiStack Gateway component.
type OPASpec struct {
// URL defines the third-party endpoint for authorization.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OpenPolicyAgent URL"
URL string `json:"url"`
}
// AuthorizationSpec defines the opa, role bindings and roles
// configuration per tenant for lokiStack Gateway component.
type AuthorizationSpec struct {
// OPA defines the spec for the third-party endpoint for tenant's authorization.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OPA Configuration"
OPA *OPASpec `json:"opa"`
// Roles defines a set of permissions to interact with a tenant.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Static Roles"
Roles []RoleSpec `json:"roles"`
// RoleBindings defines configuration to bind a set of roles to a set of subjects.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Static Role Bindings"
RoleBindings []RoleBindingsSpec `json:"roleBindings"`
}
// TenantSecretSpec is a secret reference containing name only
// for a secret living in the same namespace as the LokiStack custom resource.
type TenantSecretSpec struct {
// Name of a secret in the namespace configured for tenant secrets.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:Secret",displayName="Tenant Secret Name"
Name string `json:"name"`
}
// OIDCSpec defines the oidc configuration spec for lokiStack Gateway component.
type OIDCSpec struct {
// Secret defines the spec for the clientID, clientSecret and issuerCAPath for tenant's authentication.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant Secret"
Secret *TenantSecretSpec `json:"secret"`
// IssuerURL defines the URL for issuer.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Issuer URL"
IssuerURL string `json:"issuerURL"`
// RedirectURL defines the URL for redirect.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Redirect URL"
RedirectURL string `json:"redirectURL"`
GroupClaim string `json:"groupClaim"`
UsernameClaim string `json:"usernameClaim"`
}
// AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component.
type AuthenticationSpec struct {
// TenantName defines the name of the tenant.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant Name"
TenantName string `json:"tenantName"`
// TenantID defines the id of the tenant.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant ID"
TenantID string `json:"tenantId"`
// OIDC defines the spec for the OIDC tenant's authentication.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OIDC Configuration"
OIDC *OIDCSpec `json:"oidc"`
}
// ModeType is the authentication/authorization mode in which LokiStack Gateway will be configured.
//
// +kubebuilder:validation:Enum=static;dynamic;openshift-logging
type ModeType string
const (
// Static mode asserts the Authorization Spec's Roles and RoleBindings
// using an in-process OpenPolicyAgent Rego authorizer.
Static ModeType = "static"
// Dynamic mode delegates the authorization to a third-party OPA-compatible endpoint.
Dynamic ModeType = "dynamic"
// OpenshiftLogging mode provides fully automatic OpenShift in-cluster authentication and authorization support.
OpenshiftLogging ModeType = "openshift-logging"
)
// TenantsSpec defines the mode, authentication and authorization
// configuration of the lokiStack gateway component.
type TenantsSpec struct {
// Mode defines the mode in which lokistack-gateway component will be configured.
//
// +required
// +kubebuilder:validation:Required
// +kubebuilder:default:=openshift-logging
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:static","urn:alm:descriptor:com.tectonic.ui:select:dynamic","urn:alm:descriptor:com.tectonic.ui:select:openshift-logging"},displayName="Mode"
Mode ModeType `json:"mode"`
// Authentication defines the lokistack-gateway component authentication configuration spec per tenant.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Authentication"
Authentication []AuthenticationSpec `json:"authentication,omitempty"`
// Authorization defines the lokistack-gateway component authorization configuration spec per tenant.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Authorization"
Authorization *AuthorizationSpec `json:"authorization,omitempty"`
}
// LokiComponentSpec defines the requirements to configure scheduling
// of each loki component individually.
type LokiComponentSpec struct {
// Replicas defines the number of replica pods of the component.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:hidden"
Replicas int32 `json:"replicas,omitempty"`
// NodeSelector defines the labels required by a node to schedule
// the component onto it.
//
// +optional
// +kubebuilder:validation:Optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Tolerations defines the tolerations required by a node to schedule
// the component onto it.
//
// +optional
// +kubebuilder:validation:Optional
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
}
// LokiTemplateSpec defines the template of all requirements to configure
// scheduling of all Loki components to be deployed.
type LokiTemplateSpec struct {
// Compactor defines the compaction component spec.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Compactor pods"
Compactor *LokiComponentSpec `json:"compactor,omitempty"`
// Distributor defines the distributor component spec.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Distributor pods"
Distributor *LokiComponentSpec `json:"distributor,omitempty"`
// Ingester defines the ingester component spec.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Ingester pods"
Ingester *LokiComponentSpec `json:"ingester,omitempty"`
// Querier defines the querier component spec.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Querier pods"
Querier *LokiComponentSpec `json:"querier,omitempty"`
// QueryFrontend defines the query frontend component spec.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Query Frontend pods"
QueryFrontend *LokiComponentSpec `json:"queryFrontend,omitempty"`
// Gateway defines the lokistack gateway component spec.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Gateway pods"
Gateway *LokiComponentSpec `json:"gateway,omitempty"`
// IndexGateway defines the index gateway component spec.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Index Gateway pods"
IndexGateway *LokiComponentSpec `json:"indexGateway,omitempty"`
}
// ObjectStorageSecretSpec is a secret reference containing name only, no namespace.
type ObjectStorageSecretSpec struct {
// Name of a secret in the namespace configured for object storage secrets.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:Secret",displayName="Object Storage Secret"
Name string `json:"name"`
}
// ObjectStorageSpec defines the requirements to access the object
// storage bucket to persist logs by the ingester component.
type ObjectStorageSpec struct {
// Secret for object storage authentication.
// Name of a secret in the same namespace as the cluster logging operator.
//
// +required
// +kubebuilder:validation:Required
Secret ObjectStorageSecretSpec `json:"secret"`
}
// QueryLimitSpec defines the limits applies at the query path.
type QueryLimitSpec struct {
// MaxEntriesLimitsPerQuery defines the maximum number of log entries
// that will be returned for a query.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Entries Limit per Query"
MaxEntriesLimitPerQuery int32 `json:"maxEntriesLimitPerQuery,omitempty"`
// MaxChunksPerQuery defines the maximum number of chunks
// that can be fetched by a single query.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Chunk per Query"
MaxChunksPerQuery int32 `json:"maxChunksPerQuery,omitempty"`
// MaxQuerySeries defines the the maximum of unique series
// that is returned by a metric query.
//
// + optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Query Series"
MaxQuerySeries int32 `json:"maxQuerySeries,omitempty"`
}
// IngestionLimitSpec defines the limits applied at the ingestion path.
type IngestionLimitSpec struct {
// IngestionRate defines the sample size per second. Units MB.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Ingestion Rate (in MB)"
IngestionRate int32 `json:"ingestionRate,omitempty"`
// IngestionBurstSize defines the local rate-limited sample size per
// distributor replica. It should be set to the set at least to the
// maximum logs size expected in a single push request.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Ingestion Burst Size (in MB)"
IngestionBurstSize int32 `json:"ingestionBurstSize,omitempty"`
// MaxLabelNameLength defines the maximum number of characters allowed
// for label keys in log streams.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Label Name Length"
MaxLabelNameLength int32 `json:"maxLabelNameLength,omitempty"`
// MaxLabelValueLength defines the maximum number of characters allowed
// for label values in log streams.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Label Value Length"
MaxLabelValueLength int32 `json:"maxLabelValueLength,omitempty"`
// MaxLabelNamesPerSeries defines the maximum number of label names per series
// in each log stream.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Labels Names per Series"
MaxLabelNamesPerSeries int32 `json:"maxLabelNamesPerSeries,omitempty"`
// MaxGlobalStreamsPerTenant defines the maximum number of active streams
// per tenant, across the cluster.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Global Streams per Tenant"
MaxGlobalStreamsPerTenant int32 `json:"maxGlobalStreamsPerTenant,omitempty"`
// MaxLineSize defines the maximum line size on ingestion path. Units in Bytes.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Line Size"
MaxLineSize int32 `json:"maxLineSize,omitempty"`
}
// LimitsTemplateSpec defines the limits applied at ingestion or query path.
type LimitsTemplateSpec struct {
// IngestionLimits defines the limits applied on ingested log streams.
//
// +optional
// +kubebuilder:validation:Optional
IngestionLimits *IngestionLimitSpec `json:"ingestion,omitempty"`
// QueryLimits defines the limit applied on querying log streams.
//
// +optional
// +kubebuilder:validation:Optional
QueryLimits *QueryLimitSpec `json:"queries,omitempty"`
}
// LimitsSpec defines the spec for limits applied at ingestion or query
// path across the cluster or per tenant.
type LimitsSpec struct {
// Global defines the limits applied globally across the cluster.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Global Limits"
Global *LimitsTemplateSpec `json:"global,omitempty"`
// Tenants defines the limits applied per tenant.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Limits per Tenant"
Tenants map[string]LimitsTemplateSpec `json:"tenants,omitempty"`
}
// LokiStackSpec defines the desired state of LokiStack
type LokiStackSpec struct {
// ManagementState defines if the CR should be managed by the operator or not.
// Default is managed.
//
// +required
// +kubebuilder:validation:Required
// +kubebuilder:default:=Managed
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:Managed","urn:alm:descriptor:com.tectonic.ui:select:Unmanaged"},displayName="Management State"
ManagementState ManagementStateType `json:"managementState,omitempty"`
// Size defines one of the support Loki deployment scale out sizes.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small","urn:alm:descriptor:com.tectonic.ui:select:1x.small","urn:alm:descriptor:com.tectonic.ui:select:1x.medium"},displayName="LokiStack Size"
Size LokiStackSizeType `json:"size"`
// Storage defines the spec for the object storage endpoint to store logs.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Object Storage"
Storage ObjectStorageSpec `json:"storage"`
// Storage class name defines the storage class for ingester/querier PVCs.
//
// +required
// +kubebuilder:validation:Required
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:StorageClass",displayName="Storage Class Name"
StorageClassName string `json:"storageClassName"`
// ReplicationFactor defines the policy for log stream replication.
//
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:Minimum:=1
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Replication Factor"
ReplicationFactor int32 `json:"replicationFactor"`
// Limits defines the limits to be applied to log stream processing.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Rate Limiting"
Limits *LimitsSpec `json:"limits,omitempty"`
// Template defines the resource/limits/tolerations/nodeselectors per component
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Node Placement"
Template *LokiTemplateSpec `json:"template,omitempty"`
// Tenants defines the per-tenant authentication and authorization spec for the lokistack-gateway component.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenants Configuration"
Tenants *TenantsSpec `json:"tenants,omitempty"`
}
// LokiStackConditionType deifnes the type of condition types of a Loki deployment.
type LokiStackConditionType string
const (
// ConditionReady defines the condition that all components in the Loki deployment are ready.
ConditionReady LokiStackConditionType = "Ready"
// ConditionPending defines the conditioin that some or all components are in pending state.
ConditionPending LokiStackConditionType = "Pending"
// ConditionFailed defines the condition that components in the Loki deployment failed to roll out.
ConditionFailed LokiStackConditionType = "Failed"
// ConditionDegraded defines the condition that some or all components in the Loki deployment
// are degraded or the cluster cannot connect to object storage.
ConditionDegraded LokiStackConditionType = "Degraded"
)
// LokiStackConditionReason defines the type for valid reasons of a Loki deployment conditions.
type LokiStackConditionReason string
const (
// ReasonFailedComponents when all/some LokiStack components fail to roll out.
ReasonFailedComponents LokiStackConditionReason = "FailedComponents"
// ReasonPendingComponents when all/some LokiStack components pending dependencies
ReasonPendingComponents LokiStackConditionReason = "PendingComponents"
// ReasonReadyComponents when all LokiStack components are ready to serve traffic.
ReasonReadyComponents LokiStackConditionReason = "ReadyComponents"
// ReasonMissingObjectStorageSecret when the required secret to store logs to object
// storage is missing.
ReasonMissingObjectStorageSecret LokiStackConditionReason = "MissingObjectStorageSecret"
// ReasonInvalidObjectStorageSecret when the format of the secret is invalid.
ReasonInvalidObjectStorageSecret LokiStackConditionReason = "InvalidObjectStorageSecret"
// ReasonInvalidReplicationConfiguration when the configurated replication factor is not valid
// with the select cluster size.
ReasonInvalidReplicationConfiguration LokiStackConditionReason = "InvalidReplicationConfiguration"
// ReasonMissingGatewayTenantSecret when the required tenant secret
// for authentication is missing.
ReasonMissingGatewayTenantSecret LokiStackConditionReason = "MissingGatewayTenantSecret"
// ReasonInvalidGatewayTenantSecret when the format of the secret is invalid.
ReasonInvalidGatewayTenantSecret LokiStackConditionReason = "InvalidGatewayTenantSecret"
// ReasonInvalidTenantsConfiguration when the tenant configuration provided is invalid.
ReasonInvalidTenantsConfiguration LokiStackConditionReason = "InvalidTenantsConfiguration"
// ReasonMissingGatewayOpenShiftBaseDomain when the reconciler cannot lookup the OpenShift DNS base domain.
ReasonMissingGatewayOpenShiftBaseDomain LokiStackConditionReason = "MissingGatewayOpenShiftBaseDomain"
)
// PodStatusMap defines the type for mapping pod status to pod name.
type PodStatusMap map[corev1.PodPhase][]string
// LokiStackComponentStatus defines the map of per pod status per LokiStack component.
// Each component is represented by a separate map of v1.Phase to a list of pods.
type LokiStackComponentStatus struct {
// Compactor is a map to the pod status of the compactor pod.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Compactor",order=5
Compactor PodStatusMap `json:"compactor,omitempty"`
// Distributor is a map to the per pod status of the distributor deployment
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Distributor",order=1
Distributor PodStatusMap `json:"distributor,omitempty"`
// IndexGateway is a map to the per pod status of the index gateway statefulset
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="IndexGateway",order=6
IndexGateway PodStatusMap `json:"indexGateway,omitempty"`
// Ingester is a map to the per pod status of the ingester statefulset
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Ingester",order=2
Ingester PodStatusMap `json:"ingester,omitempty"`
// Querier is a map to the per pod status of the querier deployment
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Querier",order=3
Querier PodStatusMap `json:"querier,omitempty"`
// QueryFrontend is a map to the per pod status of the query frontend deployment
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Query Frontend",order=4
QueryFrontend PodStatusMap `json:"queryFrontend,omitempty"`
// Gateway is a map to the per pod status of the lokistack gateway deployment.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Gateway",order=5
Gateway PodStatusMap `json:"gateway,omitempty"`
}
// LokiStackStatus defines the observed state of LokiStack
type LokiStackStatus struct {
// Components provides summary of all Loki pod status grouped
// per component.
//
// +optional
// +kubebuilder:validation:Optional
Components LokiStackComponentStatus `json:"components,omitempty"`
// Conditions of the Loki deployment health.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:io.kubernetes.conditions"
Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:categories=logging
// LokiStack is the Schema for the lokistacks API
//
// +operator-sdk:csv:customresourcedefinitions:displayName="LokiStack",resources={{Deployment,v1},{StatefulSet,v1},{ConfigMap,v1},{Ingress,v1},{Service,v1},{ServiceAccount,v1},{PersistentVolumeClaims,v1},{Route,v1},{ServiceMonitor,v1}}
type LokiStack struct {
Spec LokiStackSpec `json:"spec,omitempty"`
Status LokiStackStatus `json:"status,omitempty"`
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
}
// +kubebuilder:object:root=true
// LokiStackList contains a list of LokiStack
type LokiStackList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []LokiStack `json:"items"`
}
func init() {
SchemeBuilder.Register(&LokiStack{}, &LokiStackList{})
}

@ -0,0 +1,666 @@
// +build !ignore_autogenerated
// Code generated by controller-gen. DO NOT EDIT.
package v1beta1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) {
*out = *in
if in.OIDC != nil {
in, out := &in.OIDC, &out.OIDC
*out = new(OIDCSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec.
func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec {
if in == nil {
return nil
}
out := new(AuthenticationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthorizationSpec) DeepCopyInto(out *AuthorizationSpec) {
*out = *in
if in.OPA != nil {
in, out := &in.OPA, &out.OPA
*out = new(OPASpec)
**out = **in
}
if in.Roles != nil {
in, out := &in.Roles, &out.Roles
*out = make([]RoleSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RoleBindings != nil {
in, out := &in.RoleBindings, &out.RoleBindings
*out = make([]RoleBindingsSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationSpec.
func (in *AuthorizationSpec) DeepCopy() *AuthorizationSpec {
if in == nil {
return nil
}
out := new(AuthorizationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngestionLimitSpec) DeepCopyInto(out *IngestionLimitSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestionLimitSpec.
func (in *IngestionLimitSpec) DeepCopy() *IngestionLimitSpec {
if in == nil {
return nil
}
out := new(IngestionLimitSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LimitsSpec) DeepCopyInto(out *LimitsSpec) {
*out = *in
if in.Global != nil {
in, out := &in.Global, &out.Global
*out = new(LimitsTemplateSpec)
(*in).DeepCopyInto(*out)
}
if in.Tenants != nil {
in, out := &in.Tenants, &out.Tenants
*out = make(map[string]LimitsTemplateSpec, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsSpec.
func (in *LimitsSpec) DeepCopy() *LimitsSpec {
if in == nil {
return nil
}
out := new(LimitsSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LimitsTemplateSpec) DeepCopyInto(out *LimitsTemplateSpec) {
*out = *in
if in.IngestionLimits != nil {
in, out := &in.IngestionLimits, &out.IngestionLimits
*out = new(IngestionLimitSpec)
**out = **in
}
if in.QueryLimits != nil {
in, out := &in.QueryLimits, &out.QueryLimits
*out = new(QueryLimitSpec)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsTemplateSpec.
func (in *LimitsTemplateSpec) DeepCopy() *LimitsTemplateSpec {
if in == nil {
return nil
}
out := new(LimitsTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LokiComponentSpec) DeepCopyInto(out *LokiComponentSpec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiComponentSpec.
func (in *LokiComponentSpec) DeepCopy() *LokiComponentSpec {
if in == nil {
return nil
}
out := new(LokiComponentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LokiStack) DeepCopyInto(out *LokiStack) {
*out = *in
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStack.
func (in *LokiStack) DeepCopy() *LokiStack {
if in == nil {
return nil
}
out := new(LokiStack)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LokiStack) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LokiStackComponentStatus) DeepCopyInto(out *LokiStackComponentStatus) {
*out = *in
if in.Compactor != nil {
in, out := &in.Compactor, &out.Compactor
*out = make(PodStatusMap, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.Distributor != nil {
in, out := &in.Distributor, &out.Distributor
*out = make(PodStatusMap, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.IndexGateway != nil {
in, out := &in.IndexGateway, &out.IndexGateway
*out = make(PodStatusMap, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.Ingester != nil {
in, out := &in.Ingester, &out.Ingester
*out = make(PodStatusMap, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.Querier != nil {
in, out := &in.Querier, &out.Querier
*out = make(PodStatusMap, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.QueryFrontend != nil {
in, out := &in.QueryFrontend, &out.QueryFrontend
*out = make(PodStatusMap, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.Gateway != nil {
in, out := &in.Gateway, &out.Gateway
*out = make(PodStatusMap, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackComponentStatus.
func (in *LokiStackComponentStatus) DeepCopy() *LokiStackComponentStatus {
if in == nil {
return nil
}
out := new(LokiStackComponentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LokiStackList) DeepCopyInto(out *LokiStackList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]LokiStack, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackList.
func (in *LokiStackList) DeepCopy() *LokiStackList {
if in == nil {
return nil
}
out := new(LokiStackList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LokiStackList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LokiStackSpec) DeepCopyInto(out *LokiStackSpec) {
*out = *in
out.Storage = in.Storage
if in.Limits != nil {
in, out := &in.Limits, &out.Limits
*out = new(LimitsSpec)
(*in).DeepCopyInto(*out)
}
if in.Template != nil {
in, out := &in.Template, &out.Template
*out = new(LokiTemplateSpec)
(*in).DeepCopyInto(*out)
}
if in.Tenants != nil {
in, out := &in.Tenants, &out.Tenants
*out = new(TenantsSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackSpec.
func (in *LokiStackSpec) DeepCopy() *LokiStackSpec {
if in == nil {
return nil
}
out := new(LokiStackSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LokiStackStatus) DeepCopyInto(out *LokiStackStatus) {
*out = *in
in.Components.DeepCopyInto(&out.Components)
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackStatus.
func (in *LokiStackStatus) DeepCopy() *LokiStackStatus {
if in == nil {
return nil
}
out := new(LokiStackStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LokiTemplateSpec) DeepCopyInto(out *LokiTemplateSpec) {
*out = *in
if in.Compactor != nil {
in, out := &in.Compactor, &out.Compactor
*out = new(LokiComponentSpec)
(*in).DeepCopyInto(*out)
}
if in.Distributor != nil {
in, out := &in.Distributor, &out.Distributor
*out = new(LokiComponentSpec)
(*in).DeepCopyInto(*out)
}
if in.Ingester != nil {
in, out := &in.Ingester, &out.Ingester
*out = new(LokiComponentSpec)
(*in).DeepCopyInto(*out)
}
if in.Querier != nil {
in, out := &in.Querier, &out.Querier
*out = new(LokiComponentSpec)
(*in).DeepCopyInto(*out)
}
if in.QueryFrontend != nil {
in, out := &in.QueryFrontend, &out.QueryFrontend
*out = new(LokiComponentSpec)
(*in).DeepCopyInto(*out)
}
if in.Gateway != nil {
in, out := &in.Gateway, &out.Gateway
*out = new(LokiComponentSpec)
(*in).DeepCopyInto(*out)
}
if in.IndexGateway != nil {
in, out := &in.IndexGateway, &out.IndexGateway
*out = new(LokiComponentSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiTemplateSpec.
func (in *LokiTemplateSpec) DeepCopy() *LokiTemplateSpec {
if in == nil {
return nil
}
out := new(LokiTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OIDCSpec) DeepCopyInto(out *OIDCSpec) {
*out = *in
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
*out = new(TenantSecretSpec)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCSpec.
func (in *OIDCSpec) DeepCopy() *OIDCSpec {
if in == nil {
return nil
}
out := new(OIDCSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OPASpec) DeepCopyInto(out *OPASpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OPASpec.
func (in *OPASpec) DeepCopy() *OPASpec {
if in == nil {
return nil
}
out := new(OPASpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectStorageSecretSpec) DeepCopyInto(out *ObjectStorageSecretSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSecretSpec.
func (in *ObjectStorageSecretSpec) DeepCopy() *ObjectStorageSecretSpec {
if in == nil {
return nil
}
out := new(ObjectStorageSecretSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) {
*out = *in
out.Secret = in.Secret
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec.
func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec {
if in == nil {
return nil
}
out := new(ObjectStorageSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in PodStatusMap) DeepCopyInto(out *PodStatusMap) {
{
in := &in
*out = make(PodStatusMap, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusMap.
func (in PodStatusMap) DeepCopy() PodStatusMap {
if in == nil {
return nil
}
out := new(PodStatusMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *QueryLimitSpec) DeepCopyInto(out *QueryLimitSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryLimitSpec.
func (in *QueryLimitSpec) DeepCopy() *QueryLimitSpec {
if in == nil {
return nil
}
out := new(QueryLimitSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleBindingsSpec) DeepCopyInto(out *RoleBindingsSpec) {
*out = *in
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]Subject, len(*in))
copy(*out, *in)
}
if in.Roles != nil {
in, out := &in.Roles, &out.Roles
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingsSpec.
func (in *RoleBindingsSpec) DeepCopy() *RoleBindingsSpec {
if in == nil {
return nil
}
out := new(RoleBindingsSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleSpec) DeepCopyInto(out *RoleSpec) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Tenants != nil {
in, out := &in.Tenants, &out.Tenants
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Permissions != nil {
in, out := &in.Permissions, &out.Permissions
*out = make([]PermissionType, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleSpec.
func (in *RoleSpec) DeepCopy() *RoleSpec {
if in == nil {
return nil
}
out := new(RoleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Subject) DeepCopyInto(out *Subject) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
func (in *Subject) DeepCopy() *Subject {
if in == nil {
return nil
}
out := new(Subject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TenantSecretSpec) DeepCopyInto(out *TenantSecretSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantSecretSpec.
func (in *TenantSecretSpec) DeepCopy() *TenantSecretSpec {
if in == nil {
return nil
}
out := new(TenantSecretSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TenantsSpec) DeepCopyInto(out *TenantsSpec) {
*out = *in
if in.Authentication != nil {
in, out := &in.Authentication, &out.Authentication
*out = make([]AuthenticationSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Authorization != nil {
in, out := &in.Authorization, &out.Authorization
*out = new(AuthorizationSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantsSpec.
func (in *TenantsSpec) DeepCopy() *TenantsSpec {
if in == nil {
return nil
}
out := new(TenantsSpec)
in.DeepCopyInto(out)
return out
}

@ -0,0 +1,20 @@
FROM scratch
# Core bundle labels.
LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1
LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/
LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/
LABEL operators.operatorframework.io.bundle.package.v1=loki-operator
LABEL operators.operatorframework.io.bundle.channels.v1=tech-preview
LABEL operators.operatorframework.io.metrics.builder=operator-sdk-unknown
LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1
LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3
# Labels for testing.
LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1
LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/
# Copy files to locations specified by labels.
COPY bundle/manifests /manifests/
COPY bundle/metadata /metadata/
COPY bundle/tests/scorecard /tests/scorecard/

@ -0,0 +1,28 @@
apiVersion: v1
kind: Service
metadata:
annotations:
service.beta.openshift.io/serving-cert-secret-name: loki-operator-metrics
creationTimestamp: null
labels:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/version: 0.0.1
name: loki-operator-controller-manager-metrics-service
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/version: 0.0.1
name: loki-operator-controller-manager
status:
loadBalancer: {}

@ -0,0 +1,23 @@
apiVersion: v1
data:
controller_manager_config.yaml: |
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
kind: ControllerManagerConfig
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: 127.0.0.1:8080
webhook:
port: 9443
leaderElection:
leaderElect: true
resourceName: e3716011.grafana.com
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/version: 0.0.1
name: loki-operator-manager-config

@ -0,0 +1,25 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/version: 0.0.1
name: loki-operator
name: loki-operator-metrics-monitor
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
interval: 30s
path: /metrics
scheme: https
scrapeTimeout: 10s
targetPort: 8443
tlsConfig:
caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt
serverName: loki-operator-controller-manager-metrics-service.openshift-logging.svc
selector:
matchLabels:
app.kubernetes.io/name: loki-operator

@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/version: 0.0.1
name: loki-operator-metrics-reader
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

@ -0,0 +1,25 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
include.release.openshift.io/self-managed-high-availability: "true"
include.release.openshift.io/single-node-developer: "true"
creationTimestamp: null
labels:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/version: 0.0.1
name: loki-operator-prometheus
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch

@ -0,0 +1,22 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations:
include.release.openshift.io/self-managed-high-availability: "true"
include.release.openshift.io/single-node-developer: "true"
creationTimestamp: null
labels:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/version: 0.0.1
name: loki-operator-prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: loki-operator-prometheus
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: openshift-monitoring

@ -0,0 +1,701 @@
apiVersion: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
metadata:
annotations:
alm-examples: |-
[
{
"apiVersion": "loki.grafana.com/v1beta1",
"kind": "LokiStack",
"metadata": {
"name": "lokistack-sample"
},
"spec": {
"replicationFactor": 2,
"size": "1x.small",
"storage": {
"secret": {
"name": "test"
}
},
"storageClassName": "standard"
}
}
]
capabilities: Full Lifecycle
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:v0.0.1
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
### Loki Operator Namespace
The Loki Operator must be deployed to the global operator group namespace `openshift-logging`.
### Memory Considerations
Loki is a memory intensive application. The initial
set of OCP nodes may not be large enough to support the Loki stack. Additional OCP nodes must be added
to the OCP cluster if you desire to run with the recommended (or better) memory.
olm.skipRange: '>=4.6.0-0 <5.4.0'
operatorframework.io/cluster-monitoring: "true"
operatorframework.io/suggested-namespace: openshift-logging
operators.operatorframework.io/builder: operator-sdk-unknown
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
support: AOS Cluster Logging
labels:
operatorframework.io/arch.amd64: supported
operatorframework.io/arch.ppc64le: supported
operatorframework.io/arch.s390x: supported
name: loki-operator.v0.0.1
namespace: placeholder
spec:
apiservicedefinitions: {}
customresourcedefinitions:
owned:
- description: LokiStack is the Schema for the lokistacks API
displayName: LokiStack
kind: LokiStack
name: lokistacks.loki.grafana.com
resources:
- kind: ConfigMap
name: ""
version: v1
- kind: Deployment
name: ""
version: v1
- kind: Ingress
name: ""
version: v1
- kind: PersistentVolumeClaims
name: ""
version: v1
- kind: Route
name: ""
version: v1
- kind: Service
name: ""
version: v1
- kind: ServiceAccount
name: ""
version: v1
- kind: ServiceMonitor
name: ""
version: v1
- kind: StatefulSet
name: ""
version: v1
specDescriptors:
- description: Limits defines the limits to be applied to log stream processing.
displayName: Rate Limiting
path: limits
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:advanced
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- description: IngestionBurstSize defines the local rate-limited sample size
per distributor replica. It should be set to the set at least to the maximum
logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: IngestionRate defines the sample size per second. Units MB.
displayName: Ingestion Rate (in MB)
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxGlobalStreamsPerTenant defines the maximum number of active
streams per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelNamesPerSeries defines the maximum number of label names
per series in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelValueLength defines the maximum number of characters
allowed for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLineSize defines the maximum line size on ingestion path.
Units in Bytes.
displayName: Max Line Size
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxQuerySeries defines the the maximum of unique series that
is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: Tenants defines the limits applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- description: IngestionBurstSize defines the local rate-limited sample size
per distributor replica. It should be set to the set at least to the maximum
logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: IngestionRate defines the sample size per second. Units MB.
displayName: Ingestion Rate (in MB)
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxGlobalStreamsPerTenant defines the maximum number of active
streams per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelNamesPerSeries defines the maximum number of label names
per series in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelValueLength defines the maximum number of characters
allowed for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLineSize defines the maximum line size on ingestion path.
Units in Bytes.
displayName: Max Line Size
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxQuerySeries defines the the maximum of unique series that
is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: ManagementState defines if the CR should be managed by the operator
or not. Default is managed.
displayName: Management State
path: managementState
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:select:Managed
- urn:alm:descriptor:com.tectonic.ui:select:Unmanaged
- description: ReplicationFactor defines the policy for log stream replication.
displayName: Replication Factor
path: replicationFactor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: Size defines one of the support Loki deployment scale out sizes.
displayName: LokiStack Size
path: size
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small
- urn:alm:descriptor:com.tectonic.ui:select:1x.small
- urn:alm:descriptor:com.tectonic.ui:select:1x.medium
- description: Storage defines the spec for the object storage endpoint to store
logs.
displayName: Object Storage
path: storage
- description: Name of a secret in the namespace configured for object storage
secrets.
displayName: Object Storage Secret
path: storage.secret.name
x-descriptors:
- urn:alm:descriptor:io.kubernetes:Secret
- description: Storage class name defines the storage class for ingester/querier
PVCs.
displayName: Storage Class Name
path: storageClassName
x-descriptors:
- urn:alm:descriptor:io.kubernetes:StorageClass
- description: Template defines the resource/limits/tolerations/nodeselectors
per component
displayName: Node Placement
path: template
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:advanced
- description: Compactor defines the compaction component spec.
displayName: Compactor pods
path: template.compactor
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.compactor.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: Distributor defines the distributor component spec.
displayName: Distributor pods
path: template.distributor
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.distributor.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: Gateway defines the lokistack gateway component spec.
displayName: Gateway pods
path: template.gateway
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.gateway.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: IndexGateway defines the index gateway component spec.
displayName: Index Gateway pods
path: template.indexGateway
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.indexGateway.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: Ingester defines the ingester component spec.
displayName: Ingester pods
path: template.ingester
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.ingester.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: Querier defines the querier component spec.
displayName: Querier pods
path: template.querier
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.querier.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: QueryFrontend defines the query frontend component spec.
displayName: Query Frontend pods
path: template.queryFrontend
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.queryFrontend.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: Tenants defines the per-tenant authentication and authorization
spec for the lokistack-gateway component.
displayName: Tenants Configuration
path: tenants
- description: Authentication defines the lokistack-gateway component authentication
configuration spec per tenant.
displayName: Authentication
path: tenants.authentication
- description: OIDC defines the spec for the OIDC tenant's authentication.
displayName: OIDC Configuration
path: tenants.authentication[0].oidc
- description: IssuerURL defines the URL for issuer.
displayName: Issuer URL
path: tenants.authentication[0].oidc.issuerURL
- description: RedirectURL defines the URL for redirect.
displayName: Redirect URL
path: tenants.authentication[0].oidc.redirectURL
- description: Secret defines the spec for the clientID, clientSecret and issuerCAPath
for tenant's authentication.
displayName: Tenant Secret
path: tenants.authentication[0].oidc.secret
- description: Name of a secret in the namespace configured for tenant secrets.
displayName: Tenant Secret Name
path: tenants.authentication[0].oidc.secret.name
x-descriptors:
- urn:alm:descriptor:io.kubernetes:Secret
- description: TenantID defines the id of the tenant.
displayName: Tenant ID
path: tenants.authentication[0].tenantId
- description: TenantName defines the name of the tenant.
displayName: Tenant Name
path: tenants.authentication[0].tenantName
- description: Authorization defines the lokistack-gateway component authorization
configuration spec per tenant.
displayName: Authorization
path: tenants.authorization
- description: OPA defines the spec for the third-party endpoint for tenant's
authorization.
displayName: OPA Configuration
path: tenants.authorization.opa
- description: URL defines the third-party endpoint for authorization.
displayName: OpenPolicyAgent URL
path: tenants.authorization.opa.url
- description: RoleBindings defines configuration to bind a set of roles to
a set of subjects.
displayName: Static Role Bindings
path: tenants.authorization.roleBindings
- description: Roles defines a set of permissions to interact with a tenant.
displayName: Static Roles
path: tenants.authorization.roles
- description: Mode defines the mode in which lokistack-gateway component will
be configured.
displayName: Mode
path: tenants.mode
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:select:static
- urn:alm:descriptor:com.tectonic.ui:select:dynamic
- urn:alm:descriptor:com.tectonic.ui:select:openshift-logging
statusDescriptors:
- description: Distributor is a map to the per pod status of the distributor
deployment
displayName: Distributor
path: components.distributor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: Ingester is a map to the per pod status of the ingester statefulset
displayName: Ingester
path: components.ingester
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: Querier is a map to the per pod status of the querier deployment
displayName: Querier
path: components.querier
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: QueryFrontend is a map to the per pod status of the query frontend
deployment
displayName: Query Frontend
path: components.queryFrontend
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: Compactor is a map to the pod status of the compactor pod.
displayName: Compactor
path: components.compactor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: Gateway is a map to the per pod status of the lokistack gateway
deployment.
displayName: Gateway
path: components.gateway
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: IndexGateway is a map to the per pod status of the index gateway
statefulset
displayName: IndexGateway
path: components.indexGateway
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: Conditions of the Loki deployment health.
displayName: Conditions
path: conditions
x-descriptors:
- urn:alm:descriptor:io.kubernetes.conditions
version: v1beta1
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
### Loki Operator Namespace
The Loki Operator must be deployed to the global operator group namespace `openshift-logging`.
### Memory Considerations
Loki is a memory intensive application. The initial
set of OCP nodes may not be large enough to support the Loki cluster. Additional OCP nodes must be added
to the OCP cluster if you desire to run with the recommended (or better) memory.
displayName: Loki Operator
icon:
- base64data: PHN2ZyBpZD0iYWZiNDE1NDktYzU3MC00OWI3LTg1Y2QtNjU3NjAwZWRmMmUxIiBkYXRhLW5hbWU9IkxheWVyIDEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDcyMS4xNSA3MjEuMTUiPgogIDxkZWZzPgogICAgPHN0eWxlPgogICAgICAuYTQ0OGZkZWEtNGE0Yy00Njc4LTk3NmEtYzM3ODUzMDhhZTA2IHsKICAgICAgICBmaWxsOiAjZGIzOTI3OwogICAgICB9CgogICAgICAuZTEzMzA4YjgtNzQ4NS00Y2IwLTk3NjUtOGE1N2I5M2Y5MWE2IHsKICAgICAgICBmaWxsOiAjY2IzNzI4OwogICAgICB9CgogICAgICAuZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIHsKICAgICAgICBmaWxsOiAjZmZmOwogICAgICB9CgogICAgICAuYTA0MjBjYWMtZWJlNi00YzE4LWI5ODEtYWJiYTBiYTliMzY1IHsKICAgICAgICBmaWxsOiAjZTVlNWU0OwogICAgICB9CiAgICA8L3N0eWxlPgogIDwvZGVmcz4KICA8Y2lyY2xlIGNsYXNzPSJhNDQ4ZmRlYS00YTRjLTQ2NzgtOTc2YS1jMzc4NTMwOGFlMDYiIGN4PSIzNjAuNTgiIGN5PSIzNjAuNTgiIHI9IjM1OC4yOCIvPgogIDxwYXRoIGNsYXNzPSJlMTMzMDhiOC03NDg1LTRjYjAtOTc2NS04YTU3YjkzZjkxYTYiIGQ9Ik02MTMuNTQsMTA3LjMsMTA2Ljg4LDYxNGMxNDAsMTM4LjUxLDM2NS44MiwxMzguMDYsNTA1LjI2LTEuMzlTNzUyLDI0Ny4zMyw2MTMuNTQsMTA3LjNaIi8+CiAgPGc+CiAgICA8Y2lyY2xlIGNsYXNzPSJlNzcyODZmMS0yMmRhLTQ0ZDEtOGVmYi0xZDE0YjBjY2FlNjIiIGN4PSIyMzQuNyIgY3k9IjM1Ny4zIiByPSI0Ny43MiIvPgogICAgPGNpcmNsZSBjbGFzcz0iZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIiBjeD0iMjM0LjciIGN5PSIxODIuOTQiIHI9IjQ3LjcyIi8+CiAgICA8Y2lyY2xlIGNsYXNzPSJlNzcyODZmMS0yMmRhLTQ0ZDEtOGVmYi0xZDE0YjBjY2FlNjIiIGN4PSIyMzQuNyIgY3k9IjUzOC4yMSIgcj0iNDcuNzIiLz4KICA8L2c+CiAgPHBvbHlnb24gY2xhc3M9ImU3NzI4NmYxLTIyZGEtNDRkMS04ZWZiLTFkMTRiMGNjYWU2MiIgcG9pbnRzPSI0MzUuMTkgMzQ3LjMgMzkwLjU0IDM0Ny4zIDM5MC41NCAxNzIuOTQgMzE2LjE2IDE3Mi45NCAzMTYuMTYgMTkyLjk0IDM3MC41NCAxOTIuOTQgMzcwLjU0IDM0Ny4zIDMxNi4xNiAzNDcuMyAzMTYuMTYgMzY3LjMgMzcwLjU0IDM2Ny4zIDM3MC41NCA1MjEuNjcgMzE2LjE2IDUyMS42NyAzMTYuMTYgNTQxLjY3IDM5MC41NCA1NDEuNjcgMzkwLjU0IDM2Ny4zIDQzNS4xOSAzNjcuMyA0MzUuMTkgMzQ3LjMiLz4KICA8cG9seWdvbiBjbGFzcz0iZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIiBwb2ludHM9IjU5OS43NCAzMTcuMDMgNTU3Ljk3IDMxNy4wMyA1NTAuOTcgMzE3LjAzIDU1MC45NyAzMTAuMDMgNTUwLjk3IDI2OC4yNiA1NTAuOTcgMjY4LjI2IDQ2NC4zNiAyNjguMjYgNDY0LjM2IDQ0Ni4zNCA1OTkuNzQgNDQ2LjM0IDU5OS43NCAzMTcuMDMgNTk5Ljc0IDMxNy4wMyIvPgogIDxwb2x5Z29uIGNsYXNzPSJhMDQyMGNhYy1lYmU2LTRjMTgtYjk4MS1hYmJhMGJhOWIzNjUiIHBvaW50cz0iNTk5Ljc0IDMxMC4wMyA1NTcuOTcgMjY4LjI2IDU1Ny45NyAzMTAuMDMgNTk5Ljc0IDMxMC4wMyIvPgo8L3N2Zz4K
mediatype: image/svg+xml
install:
spec:
clusterPermissions:
- rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- serviceaccounts
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- deployments
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- config.openshift.io
resources:
- dnses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
- apiGroups:
- loki.grafana.com
resources:
- lokistacks
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- loki.grafana.com
resources:
- lokistacks/finalizers
verbs:
- update
- apiGroups:
- loki.grafana.com
resources:
- lokistacks/status
verbs:
- get
- patch
- update
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- route.openshift.io
resources:
- routes
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
serviceAccountName: default
deployments:
- name: loki-operator-controller-manager
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/version: 0.0.1
name: loki-operator-controller-manager
strategy: {}
template:
metadata:
labels:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/version: 0.0.1
name: loki-operator-controller-manager
spec:
containers:
- args:
- --with-lokistack-gateway
- --with-lokistack-gateway-route
- --with-cert-signing-service
- --with-service-monitors
- --with-tls-service-monitors
command:
- /manager
env:
- name: RELATED_IMAGE_LOKI
value: quay.io/openshift-logging/loki:v2.4.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
value: quay.io/observatorium/opa-openshift:latest
image: quay.io/openshift-logging/loki-operator:v0.0.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
ports:
- containerPort: 8080
name: metrics
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources: {}
securityContext:
allowPrivilegeEscalation: false
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --tls-cert-file=/var/run/secrets/serving-cert/tls.crt
- --tls-private-key-file=/var/run/secrets/serving-cert/tls.key
- --v=2
image: quay.io/openshift/origin-kube-rbac-proxy:latest
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
resources: {}
volumeMounts:
- mountPath: /var/run/secrets/serving-cert
name: loki-operator-metrics-cert
terminationGracePeriodSeconds: 10
volumes:
- name: loki-operator-metrics-cert
secret:
defaultMode: 420
optional: true
secretName: loki-operator-metrics
permissions:
- rules:
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
serviceAccountName: default
strategy: deployment
installModes:
- supported: true
type: OwnNamespace
- supported: false
type: SingleNamespace
- supported: false
type: MultiNamespace
- supported: true
type: AllNamespaces
keywords:
- logging
- loki
links:
- name: Loki Operator
url: https://github.com/grafana/loki
maintainers:
- email: loki-operator-team@googlegroups.com
name: Grafana Loki SIG Operator
maturity: alpha
provider:
name: Grafana.com
version: 0.0.1

@ -0,0 +1,951 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.5.0
creationTimestamp: null
labels:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/version: 0.0.1
name: lokistacks.loki.grafana.com
spec:
group: loki.grafana.com
names:
categories:
- logging
kind: LokiStack
listKind: LokiStackList
plural: lokistacks
singular: lokistack
scope: Namespaced
versions:
- name: v1beta1
schema:
openAPIV3Schema:
description: LokiStack is the Schema for the lokistacks API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: LokiStackSpec defines the desired state of LokiStack
properties:
limits:
description: Limits defines the limits to be applied to log stream
processing.
properties:
global:
description: Global defines the limits applied globally across
the cluster.
properties:
ingestion:
description: IngestionLimits defines the limits applied on
ingested log streams.
properties:
ingestionBurstSize:
description: IngestionBurstSize defines the local rate-limited
sample size per distributor replica. It should be set
to the set at least to the maximum logs size expected
in a single push request.
format: int32
type: integer
ingestionRate:
description: IngestionRate defines the sample size per
second. Units MB.
format: int32
type: integer
maxGlobalStreamsPerTenant:
description: MaxGlobalStreamsPerTenant defines the maximum
number of active streams per tenant, across the cluster.
format: int32
type: integer
maxLabelNameLength:
description: MaxLabelNameLength defines the maximum number
of characters allowed for label keys in log streams.
format: int32
type: integer
maxLabelNamesPerSeries:
description: MaxLabelNamesPerSeries defines the maximum
number of label names per series in each log stream.
format: int32
type: integer
maxLabelValueLength:
description: MaxLabelValueLength defines the maximum number
of characters allowed for label values in log streams.
format: int32
type: integer
maxLineSize:
description: MaxLineSize defines the maximum line size
on ingestion path. Units in Bytes.
format: int32
type: integer
type: object
queries:
description: QueryLimits defines the limit applied on querying
log streams.
properties:
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number
of chunks that can be fetched by a single query.
format: int32
type: integer
maxEntriesLimitPerQuery:
description: MaxEntriesLimitsPerQuery defines the maximum
number of log entries that will be returned for a query.
format: int32
type: integer
maxQuerySeries:
description: MaxQuerySeries defines the the maximum of
unique series that is returned by a metric query.
format: int32
type: integer
type: object
type: object
tenants:
additionalProperties:
description: LimitsTemplateSpec defines the limits applied
at ingestion or query path.
properties:
ingestion:
description: IngestionLimits defines the limits applied
on ingested log streams.
properties:
ingestionBurstSize:
description: IngestionBurstSize defines the local rate-limited
sample size per distributor replica. It should be
set to the set at least to the maximum logs size expected
in a single push request.
format: int32
type: integer
ingestionRate:
description: IngestionRate defines the sample size per
second. Units MB.
format: int32
type: integer
maxGlobalStreamsPerTenant:
description: MaxGlobalStreamsPerTenant defines the maximum
number of active streams per tenant, across the cluster.
format: int32
type: integer
maxLabelNameLength:
description: MaxLabelNameLength defines the maximum
number of characters allowed for label keys in log
streams.
format: int32
type: integer
maxLabelNamesPerSeries:
description: MaxLabelNamesPerSeries defines the maximum
number of label names per series in each log stream.
format: int32
type: integer
maxLabelValueLength:
description: MaxLabelValueLength defines the maximum
number of characters allowed for label values in log
streams.
format: int32
type: integer
maxLineSize:
description: MaxLineSize defines the maximum line size
on ingestion path. Units in Bytes.
format: int32
type: integer
type: object
queries:
description: QueryLimits defines the limit applied on querying
log streams.
properties:
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number
of chunks that can be fetched by a single query.
format: int32
type: integer
maxEntriesLimitPerQuery:
description: MaxEntriesLimitsPerQuery defines the maximum
number of log entries that will be returned for a
query.
format: int32
type: integer
maxQuerySeries:
description: MaxQuerySeries defines the the maximum
of unique series that is returned by a metric query.
format: int32
type: integer
type: object
type: object
description: Tenants defines the limits applied per tenant.
type: object
type: object
managementState:
default: Managed
description: ManagementState defines if the CR should be managed by
the operator or not. Default is managed.
enum:
- Managed
- Unmanaged
type: string
replicationFactor:
description: ReplicationFactor defines the policy for log stream replication.
format: int32
minimum: 1
type: integer
size:
description: Size defines one of the support Loki deployment scale
out sizes.
enum:
- 1x.extra-small
- 1x.small
- 1x.medium
type: string
storage:
description: Storage defines the spec for the object storage endpoint
to store logs.
properties:
secret:
description: Secret for object storage authentication. Name of
a secret in the same namespace as the cluster logging operator.
properties:
name:
description: Name of a secret in the namespace configured
for object storage secrets.
type: string
required:
- name
type: object
required:
- secret
type: object
storageClassName:
description: Storage class name defines the storage class for ingester/querier
PVCs.
type: string
template:
description: Template defines the resource/limits/tolerations/nodeselectors
per component
properties:
compactor:
description: Compactor defines the compaction component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a
node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of
the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required
by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates
any taint that matches the triple <key,value,effect> using
the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match.
Empty means match all taint effects. When specified,
allowed values are NoSchedule, PreferNoSchedule and
NoExecute.
type: string
key:
description: Key is the taint key that the toleration
applies to. Empty means match all taint keys. If the
key is empty, operator must be Exists; this combination
means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship
to the value. Valid operators are Exists and Equal.
Defaults to Equal. Exists is equivalent to wildcard
for value, so that a pod can tolerate all taints of
a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period
of time the toleration (which must be of effect NoExecute,
otherwise this field is ignored) tolerates the taint.
By default, it is not set, which means tolerate the
taint forever (do not evict). Zero and negative values
will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration
matches to. If the operator is Exists, the value should
be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
distributor:
description: Distributor defines the distributor component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a
node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of
the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required
by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates
any taint that matches the triple <key,value,effect> using
the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match.
Empty means match all taint effects. When specified,
allowed values are NoSchedule, PreferNoSchedule and
NoExecute.
type: string
key:
description: Key is the taint key that the toleration
applies to. Empty means match all taint keys. If the
key is empty, operator must be Exists; this combination
means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship
to the value. Valid operators are Exists and Equal.
Defaults to Equal. Exists is equivalent to wildcard
for value, so that a pod can tolerate all taints of
a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period
of time the toleration (which must be of effect NoExecute,
otherwise this field is ignored) tolerates the taint.
By default, it is not set, which means tolerate the
taint forever (do not evict). Zero and negative values
will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration
matches to. If the operator is Exists, the value should
be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
gateway:
description: Gateway defines the lokistack gateway component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a
node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of
the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required
by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates
any taint that matches the triple <key,value,effect> using
the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match.
Empty means match all taint effects. When specified,
allowed values are NoSchedule, PreferNoSchedule and
NoExecute.
type: string
key:
description: Key is the taint key that the toleration
applies to. Empty means match all taint keys. If the
key is empty, operator must be Exists; this combination
means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship
to the value. Valid operators are Exists and Equal.
Defaults to Equal. Exists is equivalent to wildcard
for value, so that a pod can tolerate all taints of
a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period
of time the toleration (which must be of effect NoExecute,
otherwise this field is ignored) tolerates the taint.
By default, it is not set, which means tolerate the
taint forever (do not evict). Zero and negative values
will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration
matches to. If the operator is Exists, the value should
be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
indexGateway:
description: IndexGateway defines the index gateway component
spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a
node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of
the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required
by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates
any taint that matches the triple <key,value,effect> using
the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match.
Empty means match all taint effects. When specified,
allowed values are NoSchedule, PreferNoSchedule and
NoExecute.
type: string
key:
description: Key is the taint key that the toleration
applies to. Empty means match all taint keys. If the
key is empty, operator must be Exists; this combination
means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship
to the value. Valid operators are Exists and Equal.
Defaults to Equal. Exists is equivalent to wildcard
for value, so that a pod can tolerate all taints of
a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period
of time the toleration (which must be of effect NoExecute,
otherwise this field is ignored) tolerates the taint.
By default, it is not set, which means tolerate the
taint forever (do not evict). Zero and negative values
will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration
matches to. If the operator is Exists, the value should
be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
ingester:
description: Ingester defines the ingester component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a
node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of
the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required
by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates
any taint that matches the triple <key,value,effect> using
the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match.
Empty means match all taint effects. When specified,
allowed values are NoSchedule, PreferNoSchedule and
NoExecute.
type: string
key:
description: Key is the taint key that the toleration
applies to. Empty means match all taint keys. If the
key is empty, operator must be Exists; this combination
means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship
to the value. Valid operators are Exists and Equal.
Defaults to Equal. Exists is equivalent to wildcard
for value, so that a pod can tolerate all taints of
a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period
of time the toleration (which must be of effect NoExecute,
otherwise this field is ignored) tolerates the taint.
By default, it is not set, which means tolerate the
taint forever (do not evict). Zero and negative values
will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration
matches to. If the operator is Exists, the value should
be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
querier:
description: Querier defines the querier component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a
node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of
the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required
by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates
any taint that matches the triple <key,value,effect> using
the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match.
Empty means match all taint effects. When specified,
allowed values are NoSchedule, PreferNoSchedule and
NoExecute.
type: string
key:
description: Key is the taint key that the toleration
applies to. Empty means match all taint keys. If the
key is empty, operator must be Exists; this combination
means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship
to the value. Valid operators are Exists and Equal.
Defaults to Equal. Exists is equivalent to wildcard
for value, so that a pod can tolerate all taints of
a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period
of time the toleration (which must be of effect NoExecute,
otherwise this field is ignored) tolerates the taint.
By default, it is not set, which means tolerate the
taint forever (do not evict). Zero and negative values
will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration
matches to. If the operator is Exists, the value should
be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
queryFrontend:
description: QueryFrontend defines the query frontend component
spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a
node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of
the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required
by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates
any taint that matches the triple <key,value,effect> using
the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match.
Empty means match all taint effects. When specified,
allowed values are NoSchedule, PreferNoSchedule and
NoExecute.
type: string
key:
description: Key is the taint key that the toleration
applies to. Empty means match all taint keys. If the
key is empty, operator must be Exists; this combination
means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship
to the value. Valid operators are Exists and Equal.
Defaults to Equal. Exists is equivalent to wildcard
for value, so that a pod can tolerate all taints of
a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period
of time the toleration (which must be of effect NoExecute,
otherwise this field is ignored) tolerates the taint.
By default, it is not set, which means tolerate the
taint forever (do not evict). Zero and negative values
will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration
matches to. If the operator is Exists, the value should
be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
type: object
tenants:
description: Tenants defines the per-tenant authentication and authorization
spec for the lokistack-gateway component.
properties:
authentication:
description: Authentication defines the lokistack-gateway component
authentication configuration spec per tenant.
items:
description: AuthenticationSpec defines the oidc configuration
per tenant for lokiStack Gateway component.
properties:
oidc:
description: OIDC defines the spec for the OIDC tenant's
authentication.
properties:
groupClaim:
type: string
issuerURL:
description: IssuerURL defines the URL for issuer.
type: string
redirectURL:
description: RedirectURL defines the URL for redirect.
type: string
secret:
description: Secret defines the spec for the clientID,
clientSecret and issuerCAPath for tenant's authentication.
properties:
name:
description: Name of a secret in the namespace configured
for tenant secrets.
type: string
required:
- name
type: object
usernameClaim:
type: string
required:
- groupClaim
- issuerURL
- redirectURL
- secret
- usernameClaim
type: object
tenantId:
description: TenantID defines the id of the tenant.
type: string
tenantName:
description: TenantName defines the name of the tenant.
type: string
required:
- oidc
- tenantId
- tenantName
type: object
type: array
authorization:
description: Authorization defines the lokistack-gateway component
authorization configuration spec per tenant.
properties:
opa:
description: OPA defines the spec for the third-party endpoint
for tenant's authorization.
properties:
url:
description: URL defines the third-party endpoint for
authorization.
type: string
required:
- url
type: object
roleBindings:
description: RoleBindings defines configuration to bind a
set of roles to a set of subjects.
items:
description: RoleBindingsSpec binds a set of roles to a
set of subjects.
properties:
name:
type: string
roles:
items:
type: string
type: array
subjects:
items:
description: Subject represents a subject that has
been bound to a role.
properties:
kind:
description: SubjectKind is a kind of LokiStack
Gateway RBAC subject.
enum:
- user
- group
type: string
name:
type: string
required:
- kind
- name
type: object
type: array
required:
- name
- roles
- subjects
type: object
type: array
roles:
description: Roles defines a set of permissions to interact
with a tenant.
items:
description: RoleSpec describes a set of permissions to
interact with a tenant.
properties:
name:
type: string
permissions:
items:
description: PermissionType is a LokiStack Gateway
RBAC permission.
enum:
- read
- write
type: string
type: array
resources:
items:
type: string
type: array
tenants:
items:
type: string
type: array
required:
- name
- permissions
- resources
- tenants
type: object
type: array
type: object
mode:
default: openshift-logging
description: Mode defines the mode in which lokistack-gateway
component will be configured.
enum:
- static
- dynamic
- openshift-logging
type: string
required:
- mode
type: object
required:
- replicationFactor
- size
- storage
- storageClassName
type: object
status:
description: LokiStackStatus defines the observed state of LokiStack
properties:
components:
description: Components provides summary of all Loki pod status grouped
per component.
properties:
compactor:
additionalProperties:
items:
type: string
type: array
description: Compactor is a map to the pod status of the compactor
pod.
type: object
distributor:
additionalProperties:
items:
type: string
type: array
description: Distributor is a map to the per pod status of the
distributor deployment
type: object
gateway:
additionalProperties:
items:
type: string
type: array
description: Gateway is a map to the per pod status of the lokistack
gateway deployment.
type: object
indexGateway:
additionalProperties:
items:
type: string
type: array
description: IndexGateway is a map to the per pod status of the
index gateway statefulset
type: object
ingester:
additionalProperties:
items:
type: string
type: array
description: Ingester is a map to the per pod status of the ingester
statefulset
type: object
querier:
additionalProperties:
items:
type: string
type: array
description: Querier is a map to the per pod status of the querier
deployment
type: object
queryFrontend:
additionalProperties:
items:
type: string
type: array
description: QueryFrontend is a map to the per pod status of the
query frontend deployment
type: object
type: object
conditions:
description: Conditions of the Loki deployment health.
items:
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are:
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should be when
the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating
details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers
of specific condition types may define expected values and
meanings for this field, and whether the values are considered
a guaranteed API. The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

@ -0,0 +1,14 @@
annotations:
# Core bundle annotations.
operators.operatorframework.io.bundle.mediatype.v1: registry+v1
operators.operatorframework.io.bundle.manifests.v1: manifests/
operators.operatorframework.io.bundle.metadata.v1: metadata/
operators.operatorframework.io.bundle.package.v1: loki-operator
operators.operatorframework.io.bundle.channels.v1: tech-preview
operators.operatorframework.io.metrics.builder: operator-sdk-unknown
operators.operatorframework.io.metrics.mediatype.v1: metrics+v1
operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3
# Annotations for testing.
operators.operatorframework.io.test.mediatype.v1: scorecard+v1
operators.operatorframework.io.test.config.v1: tests/scorecard/

@ -0,0 +1,70 @@
apiVersion: scorecard.operatorframework.io/v1alpha3
kind: Configuration
metadata:
name: config
stages:
- parallel: true
tests:
- entrypoint:
- scorecard-test
- basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.4.0
labels:
suite: basic
test: basic-check-spec-test
storage:
spec:
mountPath: {}
- entrypoint:
- scorecard-test
- olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.4.0
labels:
suite: olm
test: olm-bundle-validation-test
storage:
spec:
mountPath: {}
- entrypoint:
- scorecard-test
- olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.4.0
labels:
suite: olm
test: olm-crds-have-validation-test
storage:
spec:
mountPath: {}
- entrypoint:
- scorecard-test
- olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.4.0
labels:
suite: olm
test: olm-crds-have-resources-test
storage:
spec:
mountPath: {}
- entrypoint:
- scorecard-test
- olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.4.0
labels:
suite: olm
test: olm-spec-descriptors-test
storage:
spec:
mountPath: {}
- entrypoint:
- scorecard-test
- olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.4.0
labels:
suite: olm
test: olm-status-descriptors-test
storage:
spec:
mountPath: {}
storage:
spec:
mountPath: {}

@ -0,0 +1,26 @@
# Build the calculator binary
FROM golang:1.16 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the go source
COPY cmd/size-calculator/main.go main.go
COPY internal/ internal/
# Build
RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -a -o size-calculator main.go
# Use distroless as minimal base image to package the size-calculator binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/size-calculator .
USER 65532:65532
ENTRYPOINT ["/size-calculator"]

@ -0,0 +1,152 @@
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"github.com/ViaQ/logerr/log"
"github.com/grafana/loki-operator/api/v1beta1"
"github.com/grafana/loki-operator/internal/manifests"
"sigs.k8s.io/yaml"
)
// Define the manifest options here as structured objects
type config struct {
Name string
Namespace string
Image string
featureFlags manifests.FeatureFlags
objectStorage manifests.ObjectStorage
crFilepath string
writeToDir string
}
func (c *config) registerFlags(f *flag.FlagSet) {
// LokiStack metadata options
f.StringVar(&c.Name, "name", "", "The name of the stack")
f.StringVar(&c.Namespace, "namespace", "", "Namespace to deploy to")
f.StringVar(&c.Image, "image", manifests.DefaultContainerImage, "The Loki image pull spec loation.")
// Feature flags
c.featureFlags = manifests.FeatureFlags{}
f.BoolVar(&c.featureFlags.EnableCertificateSigningService, "with-cert-signing-service", false, "Enable usage of cert-signing service for scraping prometheus metrics via TLS.")
f.BoolVar(&c.featureFlags.EnableServiceMonitors, "with-service-monitors", false, "Enable service monitors for all LokiStack components.")
f.BoolVar(&c.featureFlags.EnableTLSServiceMonitorConfig, "with-tls-service-monitors", false, "Enable TLS endpoint for service monitors.")
f.BoolVar(&c.featureFlags.EnableGateway, "with-lokistack-gateway", false, "Enables the manifest creation for the entire lokistack-gateway.")
// Object storage options
c.objectStorage = manifests.ObjectStorage{}
f.StringVar(&c.objectStorage.Endpoint, "object-storage.endpoint", "", "The S3 endpoint location.")
f.StringVar(&c.objectStorage.Buckets, "object-storage.buckets", "", "A comma-separated list of S3 buckets.")
f.StringVar(&c.objectStorage.Region, "object-storage.region", "", "An S3 region.")
f.StringVar(&c.objectStorage.AccessKeyID, "object-storage.access-key-id", "", "The access key id for S3.")
f.StringVar(&c.objectStorage.AccessKeySecret, "object-storage.access-key-secret", "", "The access key secret for S3.")
// Input and output file/dir options
f.StringVar(&c.crFilepath, "custom-resource.path", "", "Path to a custom resource YAML file.")
f.StringVar(&c.writeToDir, "output.write-dir", "", "write each file to the specified directory.")
}
func (c *config) validateFlags() {
if cfg.crFilepath == "" {
log.Info("-custom.resource.path flag is required")
os.Exit(1)
}
if cfg.Name == "" {
log.Info("-name flag is required")
os.Exit(1)
}
if cfg.Namespace == "" {
log.Info("-namespace flag is required")
os.Exit(1)
}
// Validate manifests.objectStorage
if cfg.objectStorage.Endpoint == "" {
log.Info("-object.storage.endpoint flag is required")
os.Exit(1)
}
if cfg.objectStorage.Buckets == "" {
log.Info("-object.storage.buckets flag is required")
os.Exit(1)
}
if cfg.objectStorage.AccessKeyID == "" {
log.Info("-object.storage.access.key.id flag is required")
os.Exit(1)
}
if cfg.objectStorage.AccessKeySecret == "" {
log.Info("-object.storage.access.key.secret flag is required")
os.Exit(1)
}
}
var cfg *config
func init() {
log.Init("loki-broker")
cfg = &config{}
}
func main() {
f := flag.NewFlagSet("", flag.ExitOnError)
cfg.registerFlags(f)
if err := f.Parse(os.Args[1:]); err != nil {
log.Error(err, "failed to parse flags")
}
cfg.validateFlags()
b, err := ioutil.ReadFile(cfg.crFilepath)
if err != nil {
log.Info("failed to read custom resource file", "path", cfg.crFilepath)
os.Exit(1)
}
ls := &v1beta1.LokiStack{}
if err = yaml.Unmarshal(b, ls); err != nil {
log.Error(err, "failed to unmarshal LokiStack CR", "path", cfg.crFilepath)
os.Exit(1)
}
// Convert config to manifest.Options
opts := manifests.Options{
Name: cfg.Name,
Namespace: cfg.Namespace,
Image: cfg.Image,
Stack: ls.Spec,
Flags: cfg.featureFlags,
ObjectStorage: cfg.objectStorage,
}
if optErr := manifests.ApplyDefaultSettings(&opts); optErr != nil {
log.Error(optErr, "failed to conform options to build settings")
os.Exit(1)
}
objects, err := manifests.BuildAll(opts)
if err != nil {
log.Error(err, "failed to build manifests")
os.Exit(1)
}
for _, o := range objects {
b, err := yaml.Marshal(o)
if err != nil {
log.Error(err, "failed to marshal manifest", "name", o.GetName(), "kind", o.GetObjectKind())
continue
}
if cfg.writeToDir != "" {
basename := fmt.Sprintf("%s-%s.yaml", o.GetObjectKind().GroupVersionKind().Kind, o.GetName())
fname := strings.ToLower(path.Join(cfg.writeToDir, basename))
if err := ioutil.WriteFile(fname, b, 0o644); err != nil {
log.Error(err, "failed to write file to directory", "path", fname)
os.Exit(1)
}
} else {
fmt.Fprintf(os.Stdout, "---\n%s", b)
}
}
}

@ -0,0 +1,63 @@
package main
import (
"fmt"
"math"
"os"
"time"
"github.com/grafana/loki-operator/internal/sizes"
"github.com/prometheus/common/model"
"github.com/ViaQ/logerr/log"
)
const (
// defaultDuration is the time for which the metric needs to be predicted for.
// It is passed as second parameter to predict_linear.
defaultDuration string = "24h"
// range1xSmall defines the range (in GB)
// of t-shirt size 1x.small i.e., 0 <= 1x.small <= 500
range1xSmall int = 500
// sizeOneXSmall defines the size of a single Loki deployment
// with small resources/limits requirements. This size is dedicated for setup **without** the
// requirement for single replication factor and auto-compaction.
sizeOneXSmall string = "1x.small"
// sizeOneXMedium defines the size of a single Loki deployment
// with small resources/limits requirements. This size is dedicated for setup **with** the
// requirement for single replication factor and auto-compaction.
sizeOneXMedium string = "1x.medium"
)
func init() {
log.Init("size-calculator")
}
func main() {
log.Info("starting storage size calculator...")
for {
duration, parseErr := model.ParseDuration(defaultDuration)
if parseErr != nil {
log.Error(parseErr, "failed to parse duration")
os.Exit(1)
}
logsCollected, err := sizes.PredictFor(duration)
if err != nil {
log.Error(err, "Failed to collect metrics data")
os.Exit(1)
}
logsCollectedInGB := int(math.Ceil(logsCollected / math.Pow(1024, 3)))
log.Info(fmt.Sprintf("Amount of logs expected in 24 hours is %f Bytes or %dGB", logsCollected, logsCollectedInGB))
if logsCollectedInGB <= range1xSmall {
log.Info(fmt.Sprintf("Recommended t-shirt size for %dGB is %s", logsCollectedInGB, sizeOneXSmall))
} else {
log.Info(fmt.Sprintf("Recommended t-shirt size for %dGB is %s", logsCollectedInGB, sizeOneXMedium))
}
time.Sleep(1 * time.Minute)
}
}

@ -0,0 +1,25 @@
# The following manifests contain a self-signed issuer CR and a certificate CR.
# More document can be found at https://docs.cert-manager.io
# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes.
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
namespace: system
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
namespace: system
spec:
# $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize
dnsNames:
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
issuerRef:
kind: Issuer
name: selfsigned-issuer
secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize

@ -0,0 +1,5 @@
resources:
- certificate.yaml
configurations:
- kustomizeconfig.yaml

@ -0,0 +1,16 @@
# This configuration is for teaching kustomize how to update name ref and var substitution
nameReference:
- kind: Issuer
group: cert-manager.io
fieldSpecs:
- kind: Certificate
group: cert-manager.io
path: spec/issuerRef/name
varReference:
- kind: Certificate
group: cert-manager.io
path: spec/commonName
- kind: Certificate
group: cert-manager.io
path: spec/dnsNames

@ -0,0 +1,695 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.5.0
creationTimestamp: null
name: lokistacks.loki.grafana.com
spec:
group: loki.grafana.com
names:
categories:
- logging
kind: LokiStack
listKind: LokiStackList
plural: lokistacks
singular: lokistack
scope: Namespaced
versions:
- name: v1beta1
schema:
openAPIV3Schema:
description: LokiStack is the Schema for the lokistacks API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: LokiStackSpec defines the desired state of LokiStack
properties:
limits:
description: Limits defines the limits to be applied to log stream processing.
properties:
global:
description: Global defines the limits applied globally across the cluster.
properties:
ingestion:
description: IngestionLimits defines the limits applied on ingested log streams.
properties:
ingestionBurstSize:
description: IngestionBurstSize defines the local rate-limited sample size per distributor replica. It should be set to the set at least to the maximum logs size expected in a single push request.
format: int32
type: integer
ingestionRate:
description: IngestionRate defines the sample size per second. Units MB.
format: int32
type: integer
maxGlobalStreamsPerTenant:
description: MaxGlobalStreamsPerTenant defines the maximum number of active streams per tenant, across the cluster.
format: int32
type: integer
maxLabelNameLength:
description: MaxLabelNameLength defines the maximum number of characters allowed for label keys in log streams.
format: int32
type: integer
maxLabelNamesPerSeries:
description: MaxLabelNamesPerSeries defines the maximum number of label names per series in each log stream.
format: int32
type: integer
maxLabelValueLength:
description: MaxLabelValueLength defines the maximum number of characters allowed for label values in log streams.
format: int32
type: integer
maxLineSize:
description: MaxLineSize defines the maximum line size on ingestion path. Units in Bytes.
format: int32
type: integer
type: object
queries:
description: QueryLimits defines the limit applied on querying log streams.
properties:
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number of chunks that can be fetched by a single query.
format: int32
type: integer
maxEntriesLimitPerQuery:
description: MaxEntriesLimitsPerQuery defines the maximum number of log entries that will be returned for a query.
format: int32
type: integer
maxQuerySeries:
description: MaxQuerySeries defines the the maximum of unique series that is returned by a metric query.
format: int32
type: integer
type: object
type: object
tenants:
additionalProperties:
description: LimitsTemplateSpec defines the limits applied at ingestion or query path.
properties:
ingestion:
description: IngestionLimits defines the limits applied on ingested log streams.
properties:
ingestionBurstSize:
description: IngestionBurstSize defines the local rate-limited sample size per distributor replica. It should be set to the set at least to the maximum logs size expected in a single push request.
format: int32
type: integer
ingestionRate:
description: IngestionRate defines the sample size per second. Units MB.
format: int32
type: integer
maxGlobalStreamsPerTenant:
description: MaxGlobalStreamsPerTenant defines the maximum number of active streams per tenant, across the cluster.
format: int32
type: integer
maxLabelNameLength:
description: MaxLabelNameLength defines the maximum number of characters allowed for label keys in log streams.
format: int32
type: integer
maxLabelNamesPerSeries:
description: MaxLabelNamesPerSeries defines the maximum number of label names per series in each log stream.
format: int32
type: integer
maxLabelValueLength:
description: MaxLabelValueLength defines the maximum number of characters allowed for label values in log streams.
format: int32
type: integer
maxLineSize:
description: MaxLineSize defines the maximum line size on ingestion path. Units in Bytes.
format: int32
type: integer
type: object
queries:
description: QueryLimits defines the limit applied on querying log streams.
properties:
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number of chunks that can be fetched by a single query.
format: int32
type: integer
maxEntriesLimitPerQuery:
description: MaxEntriesLimitsPerQuery defines the maximum number of log entries that will be returned for a query.
format: int32
type: integer
maxQuerySeries:
description: MaxQuerySeries defines the the maximum of unique series that is returned by a metric query.
format: int32
type: integer
type: object
type: object
description: Tenants defines the limits applied per tenant.
type: object
type: object
managementState:
default: Managed
description: ManagementState defines if the CR should be managed by the operator or not. Default is managed.
enum:
- Managed
- Unmanaged
type: string
replicationFactor:
description: ReplicationFactor defines the policy for log stream replication.
format: int32
minimum: 1
type: integer
size:
description: Size defines one of the support Loki deployment scale out sizes.
enum:
- 1x.extra-small
- 1x.small
- 1x.medium
type: string
storage:
description: Storage defines the spec for the object storage endpoint to store logs.
properties:
secret:
description: Secret for object storage authentication. Name of a secret in the same namespace as the cluster logging operator.
properties:
name:
description: Name of a secret in the namespace configured for object storage secrets.
type: string
required:
- name
type: object
required:
- secret
type: object
storageClassName:
description: Storage class name defines the storage class for ingester/querier PVCs.
type: string
template:
description: Template defines the resource/limits/tolerations/nodeselectors per component
properties:
compactor:
description: Compactor defines the compaction component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
distributor:
description: Distributor defines the distributor component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
gateway:
description: Gateway defines the lokistack gateway component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
indexGateway:
description: IndexGateway defines the index gateway component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
ingester:
description: Ingester defines the ingester component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
querier:
description: Querier defines the querier component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
queryFrontend:
description: QueryFrontend defines the query frontend component spec.
properties:
nodeSelector:
additionalProperties:
type: string
description: NodeSelector defines the labels required by a node to schedule the component onto it.
type: object
replicas:
description: Replicas defines the number of replica pods of the component.
format: int32
type: integer
tolerations:
description: Tolerations defines the tolerations required by a node to schedule the component onto it.
items:
description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
type: object
tenants:
description: Tenants defines the per-tenant authentication and authorization spec for the lokistack-gateway component.
properties:
authentication:
description: Authentication defines the lokistack-gateway component authentication configuration spec per tenant.
items:
description: AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component.
properties:
oidc:
description: OIDC defines the spec for the OIDC tenant's authentication.
properties:
groupClaim:
type: string
issuerURL:
description: IssuerURL defines the URL for issuer.
type: string
redirectURL:
description: RedirectURL defines the URL for redirect.
type: string
secret:
description: Secret defines the spec for the clientID, clientSecret and issuerCAPath for tenant's authentication.
properties:
name:
description: Name of a secret in the namespace configured for tenant secrets.
type: string
required:
- name
type: object
usernameClaim:
type: string
required:
- groupClaim
- issuerURL
- redirectURL
- secret
- usernameClaim
type: object
tenantId:
description: TenantID defines the id of the tenant.
type: string
tenantName:
description: TenantName defines the name of the tenant.
type: string
required:
- oidc
- tenantId
- tenantName
type: object
type: array
authorization:
description: Authorization defines the lokistack-gateway component authorization configuration spec per tenant.
properties:
opa:
description: OPA defines the spec for the third-party endpoint for tenant's authorization.
properties:
url:
description: URL defines the third-party endpoint for authorization.
type: string
required:
- url
type: object
roleBindings:
description: RoleBindings defines configuration to bind a set of roles to a set of subjects.
items:
description: RoleBindingsSpec binds a set of roles to a set of subjects.
properties:
name:
type: string
roles:
items:
type: string
type: array
subjects:
items:
description: Subject represents a subject that has been bound to a role.
properties:
kind:
description: SubjectKind is a kind of LokiStack Gateway RBAC subject.
enum:
- user
- group
type: string
name:
type: string
required:
- kind
- name
type: object
type: array
required:
- name
- roles
- subjects
type: object
type: array
roles:
description: Roles defines a set of permissions to interact with a tenant.
items:
description: RoleSpec describes a set of permissions to interact with a tenant.
properties:
name:
type: string
permissions:
items:
description: PermissionType is a LokiStack Gateway RBAC permission.
enum:
- read
- write
type: string
type: array
resources:
items:
type: string
type: array
tenants:
items:
type: string
type: array
required:
- name
- permissions
- resources
- tenants
type: object
type: array
type: object
mode:
default: openshift-logging
description: Mode defines the mode in which lokistack-gateway component will be configured.
enum:
- static
- dynamic
- openshift-logging
type: string
required:
- mode
type: object
required:
- replicationFactor
- size
- storage
- storageClassName
type: object
status:
description: LokiStackStatus defines the observed state of LokiStack
properties:
components:
description: Components provides summary of all Loki pod status grouped per component.
properties:
compactor:
additionalProperties:
items:
type: string
type: array
description: Compactor is a map to the pod status of the compactor pod.
type: object
distributor:
additionalProperties:
items:
type: string
type: array
description: Distributor is a map to the per pod status of the distributor deployment
type: object
gateway:
additionalProperties:
items:
type: string
type: array
description: Gateway is a map to the per pod status of the lokistack gateway deployment.
type: object
indexGateway:
additionalProperties:
items:
type: string
type: array
description: IndexGateway is a map to the per pod status of the index gateway statefulset
type: object
ingester:
additionalProperties:
items:
type: string
type: array
description: Ingester is a map to the per pod status of the ingester statefulset
type: object
querier:
additionalProperties:
items:
type: string
type: array
description: Querier is a map to the per pod status of the querier deployment
type: object
queryFrontend:
additionalProperties:
items:
type: string
type: array
description: QueryFrontend is a map to the per pod status of the query frontend deployment
type: object
type: object
conditions:
description: Conditions of the Loki deployment health.
items:
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

@ -0,0 +1,21 @@
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/loki.grafana.com_lokistacks.yaml
# +kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_lokistacks.yaml
# +kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_lokistacks.yaml
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml

@ -0,0 +1,19 @@
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
version: v1beta1
group: apiextensions.k8s.io
path: spec/conversion/webhookClientConfig/service/name
namespace:
- kind: CustomResourceDefinition
version: v1beta1
group: apiextensions.k8s.io
path: spec/conversion/webhookClientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations

@ -0,0 +1,8 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: lokistacks.loki.grafana.com

@ -0,0 +1,14 @@
# The following patch enables a conversion webhook for the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: lokistacks.loki.grafana.com
spec:
conversion:
strategy: Webhook
webhookClientConfig:
service:
namespace: system
name: webhook-service
path: /convert

@ -0,0 +1,11 @@
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
kind: ControllerManagerConfig
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: 127.0.0.1:8080
webhook:
port: 9443
leaderElection:
leaderElect: true
resourceName: e3716011.grafana.com

@ -0,0 +1,16 @@
resources:
- manager.yaml
generatorOptions:
disableNameSuffixHash: true
configMapGenerator:
- files:
- controller_manager_config.yaml
name: manager-config
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: quay.io/openshift-logging/loki-operator
newTag: v0.0.1

@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
spec:
selector:
matchLabels:
name: loki-operator-controller-manager
replicas: 1
template:
metadata:
labels:
name: loki-operator-controller-manager
spec:
containers:
- command:
- /manager
image: controller:latest
imagePullPolicy: IfNotPresent
name: manager
ports:
- containerPort: 8080
name: metrics
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
terminationGracePeriodSeconds: 10

@ -0,0 +1,436 @@
apiVersion: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
metadata:
annotations:
alm-examples: '[]'
capabilities: Full Lifecycle
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:v0.0.1
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
### Loki Operator Namespace
The Loki Operator must be deployed to the global operator group namespace `openshift-logging`.
### Memory Considerations
Loki is a memory intensive application. The initial
set of OCP nodes may not be large enough to support the Loki stack. Additional OCP nodes must be added
to the OCP cluster if you desire to run with the recommended (or better) memory.
olm.skipRange: '>=4.6.0-0 <5.4.0'
operatorframework.io/cluster-monitoring: "true"
operatorframework.io/suggested-namespace: openshift-logging
support: AOS Cluster Logging
labels:
operatorframework.io/arch.amd64: supported
operatorframework.io/arch.ppc64le: supported
operatorframework.io/arch.s390x: supported
name: loki-operator.v0.0.0
namespace: placeholder
spec:
apiservicedefinitions: {}
customresourcedefinitions:
owned:
- description: LokiStack is the Schema for the lokistacks API
displayName: LokiStack
kind: LokiStack
name: lokistacks.loki.grafana.com
resources:
- kind: ConfigMap
name: ""
version: v1
- kind: Deployment
name: ""
version: v1
- kind: Ingress
name: ""
version: v1
- kind: PersistentVolumeClaims
name: ""
version: v1
- kind: Route
name: ""
version: v1
- kind: Service
name: ""
version: v1
- kind: ServiceAccount
name: ""
version: v1
- kind: ServiceMonitor
name: ""
version: v1
- kind: StatefulSet
name: ""
version: v1
specDescriptors:
- description: Limits defines the limits to be applied to log stream processing.
displayName: Rate Limiting
path: limits
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:advanced
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- description: IngestionBurstSize defines the local rate-limited sample size
per distributor replica. It should be set to the set at least to the maximum
logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: IngestionRate defines the sample size per second. Units MB.
displayName: Ingestion Rate (in MB)
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxGlobalStreamsPerTenant defines the maximum number of active
streams per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelNamesPerSeries defines the maximum number of label names
per series in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelValueLength defines the maximum number of characters
allowed for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLineSize defines the maximum line size on ingestion path.
Units in Bytes.
displayName: Max Line Size
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxQuerySeries defines the the maximum of unique series that
is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: Tenants defines the limits applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- description: IngestionBurstSize defines the local rate-limited sample size
per distributor replica. It should be set to the set at least to the maximum
logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: IngestionRate defines the sample size per second. Units MB.
displayName: Ingestion Rate (in MB)
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxGlobalStreamsPerTenant defines the maximum number of active
streams per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelNamesPerSeries defines the maximum number of label names
per series in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLabelValueLength defines the maximum number of characters
allowed for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxLineSize defines the maximum line size on ingestion path.
Units in Bytes.
displayName: Max Line Size
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: MaxQuerySeries defines the the maximum of unique series that
is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: ManagementState defines if the CR should be managed by the operator
or not. Default is managed.
displayName: Management State
path: managementState
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:select:Managed
- urn:alm:descriptor:com.tectonic.ui:select:Unmanaged
- description: ReplicationFactor defines the policy for log stream replication.
displayName: Replication Factor
path: replicationFactor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- description: Size defines one of the support Loki deployment scale out sizes.
displayName: LokiStack Size
path: size
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small
- urn:alm:descriptor:com.tectonic.ui:select:1x.small
- urn:alm:descriptor:com.tectonic.ui:select:1x.medium
- description: Storage defines the spec for the object storage endpoint to store
logs.
displayName: Object Storage
path: storage
- description: Name of a secret in the namespace configured for object storage
secrets.
displayName: Object Storage Secret
path: storage.secret.name
x-descriptors:
- urn:alm:descriptor:io.kubernetes:Secret
- description: Storage class name defines the storage class for ingester/querier
PVCs.
displayName: Storage Class Name
path: storageClassName
x-descriptors:
- urn:alm:descriptor:io.kubernetes:StorageClass
- description: Template defines the resource/limits/tolerations/nodeselectors
per component
displayName: Node Placement
path: template
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:advanced
- description: Compactor defines the compaction component spec.
displayName: Compactor pods
path: template.compactor
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.compactor.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: Distributor defines the distributor component spec.
displayName: Distributor pods
path: template.distributor
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.distributor.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: Gateway defines the lokistack gateway component spec.
displayName: Gateway pods
path: template.gateway
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.gateway.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: IndexGateway defines the index gateway component spec.
displayName: Index Gateway pods
path: template.indexGateway
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.indexGateway.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: Ingester defines the ingester component spec.
displayName: Ingester pods
path: template.ingester
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.ingester.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: Querier defines the querier component spec.
displayName: Querier pods
path: template.querier
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.querier.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: QueryFrontend defines the query frontend component spec.
displayName: Query Frontend pods
path: template.queryFrontend
- description: Replicas defines the number of replica pods of the component.
displayName: Replicas
path: template.queryFrontend.replicas
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:hidden
- description: Tenants defines the per-tenant authentication and authorization
spec for the lokistack-gateway component.
displayName: Tenants Configuration
path: tenants
- description: Authentication defines the lokistack-gateway component authentication
configuration spec per tenant.
displayName: Authentication
path: tenants.authentication
- description: OIDC defines the spec for the OIDC tenant's authentication.
displayName: OIDC Configuration
path: tenants.authentication[0].oidc
- description: IssuerURL defines the URL for issuer.
displayName: Issuer URL
path: tenants.authentication[0].oidc.issuerURL
- description: RedirectURL defines the URL for redirect.
displayName: Redirect URL
path: tenants.authentication[0].oidc.redirectURL
- description: Secret defines the spec for the clientID, clientSecret and issuerCAPath
for tenant's authentication.
displayName: Tenant Secret
path: tenants.authentication[0].oidc.secret
- description: Name of a secret in the namespace configured for tenant secrets.
displayName: Tenant Secret Name
path: tenants.authentication[0].oidc.secret.name
x-descriptors:
- urn:alm:descriptor:io.kubernetes:Secret
- description: TenantID defines the id of the tenant.
displayName: Tenant ID
path: tenants.authentication[0].tenantId
- description: TenantName defines the name of the tenant.
displayName: Tenant Name
path: tenants.authentication[0].tenantName
- description: Authorization defines the lokistack-gateway component authorization
configuration spec per tenant.
displayName: Authorization
path: tenants.authorization
- description: OPA defines the spec for the third-party endpoint for tenant's
authorization.
displayName: OPA Configuration
path: tenants.authorization.opa
- description: URL defines the third-party endpoint for authorization.
displayName: OpenPolicyAgent URL
path: tenants.authorization.opa.url
- description: RoleBindings defines configuration to bind a set of roles to
a set of subjects.
displayName: Static Role Bindings
path: tenants.authorization.roleBindings
- description: Roles defines a set of permissions to interact with a tenant.
displayName: Static Roles
path: tenants.authorization.roles
- description: Mode defines the mode in which lokistack-gateway component will
be configured.
displayName: Mode
path: tenants.mode
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:select:static
- urn:alm:descriptor:com.tectonic.ui:select:dynamic
- urn:alm:descriptor:com.tectonic.ui:select:openshift-logging
statusDescriptors:
- description: Distributor is a map to the per pod status of the distributor
deployment
displayName: Distributor
path: components.distributor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: Ingester is a map to the per pod status of the ingester statefulset
displayName: Ingester
path: components.ingester
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: Querier is a map to the per pod status of the querier deployment
displayName: Querier
path: components.querier
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: QueryFrontend is a map to the per pod status of the query frontend
deployment
displayName: Query Frontend
path: components.queryFrontend
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: Compactor is a map to the pod status of the compactor pod.
displayName: Compactor
path: components.compactor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: Gateway is a map to the per pod status of the lokistack gateway
deployment.
displayName: Gateway
path: components.gateway
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: IndexGateway is a map to the per pod status of the index gateway
statefulset
displayName: IndexGateway
path: components.indexGateway
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:podStatuses
- description: Conditions of the Loki deployment health.
displayName: Conditions
path: conditions
x-descriptors:
- urn:alm:descriptor:io.kubernetes.conditions
version: v1beta1
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
### Loki Operator Namespace
The Loki Operator must be deployed to the global operator group namespace `openshift-logging`.
### Memory Considerations
Loki is a memory intensive application. The initial
set of OCP nodes may not be large enough to support the Loki cluster. Additional OCP nodes must be added
to the OCP cluster if you desire to run with the recommended (or better) memory.
displayName: Loki Operator
icon:
- base64data: PHN2ZyBpZD0iYWZiNDE1NDktYzU3MC00OWI3LTg1Y2QtNjU3NjAwZWRmMmUxIiBkYXRhLW5hbWU9IkxheWVyIDEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDcyMS4xNSA3MjEuMTUiPgogIDxkZWZzPgogICAgPHN0eWxlPgogICAgICAuYTQ0OGZkZWEtNGE0Yy00Njc4LTk3NmEtYzM3ODUzMDhhZTA2IHsKICAgICAgICBmaWxsOiAjZGIzOTI3OwogICAgICB9CgogICAgICAuZTEzMzA4YjgtNzQ4NS00Y2IwLTk3NjUtOGE1N2I5M2Y5MWE2IHsKICAgICAgICBmaWxsOiAjY2IzNzI4OwogICAgICB9CgogICAgICAuZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIHsKICAgICAgICBmaWxsOiAjZmZmOwogICAgICB9CgogICAgICAuYTA0MjBjYWMtZWJlNi00YzE4LWI5ODEtYWJiYTBiYTliMzY1IHsKICAgICAgICBmaWxsOiAjZTVlNWU0OwogICAgICB9CiAgICA8L3N0eWxlPgogIDwvZGVmcz4KICA8Y2lyY2xlIGNsYXNzPSJhNDQ4ZmRlYS00YTRjLTQ2NzgtOTc2YS1jMzc4NTMwOGFlMDYiIGN4PSIzNjAuNTgiIGN5PSIzNjAuNTgiIHI9IjM1OC4yOCIvPgogIDxwYXRoIGNsYXNzPSJlMTMzMDhiOC03NDg1LTRjYjAtOTc2NS04YTU3YjkzZjkxYTYiIGQ9Ik02MTMuNTQsMTA3LjMsMTA2Ljg4LDYxNGMxNDAsMTM4LjUxLDM2NS44MiwxMzguMDYsNTA1LjI2LTEuMzlTNzUyLDI0Ny4zMyw2MTMuNTQsMTA3LjNaIi8+CiAgPGc+CiAgICA8Y2lyY2xlIGNsYXNzPSJlNzcyODZmMS0yMmRhLTQ0ZDEtOGVmYi0xZDE0YjBjY2FlNjIiIGN4PSIyMzQuNyIgY3k9IjM1Ny4zIiByPSI0Ny43MiIvPgogICAgPGNpcmNsZSBjbGFzcz0iZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIiBjeD0iMjM0LjciIGN5PSIxODIuOTQiIHI9IjQ3LjcyIi8+CiAgICA8Y2lyY2xlIGNsYXNzPSJlNzcyODZmMS0yMmRhLTQ0ZDEtOGVmYi0xZDE0YjBjY2FlNjIiIGN4PSIyMzQuNyIgY3k9IjUzOC4yMSIgcj0iNDcuNzIiLz4KICA8L2c+CiAgPHBvbHlnb24gY2xhc3M9ImU3NzI4NmYxLTIyZGEtNDRkMS04ZWZiLTFkMTRiMGNjYWU2MiIgcG9pbnRzPSI0MzUuMTkgMzQ3LjMgMzkwLjU0IDM0Ny4zIDM5MC41NCAxNzIuOTQgMzE2LjE2IDE3Mi45NCAzMTYuMTYgMTkyLjk0IDM3MC41NCAxOTIuOTQgMzcwLjU0IDM0Ny4zIDMxNi4xNiAzNDcuMyAzMTYuMTYgMzY3LjMgMzcwLjU0IDM2Ny4zIDM3MC41NCA1MjEuNjcgMzE2LjE2IDUyMS42NyAzMTYuMTYgNTQxLjY3IDM5MC41NCA1NDEuNjcgMzkwLjU0IDM2Ny4zIDQzNS4xOSAzNjcuMyA0MzUuMTkgMzQ3LjMiLz4KICA8cG9seWdvbiBjbGFzcz0iZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIiBwb2ludHM9IjU5OS43NCAzMTcuMDMgNTU3Ljk3IDMxNy4wMyA1NTAuOTcgMzE3LjAzIDU1MC45NyAzMTAuMDMgNTUwLjk3IDI2OC4yNiA1NTAuOTcgMjY4LjI2IDQ2NC4zNiAyNjguMjYgNDY0LjM2IDQ0Ni4zNCA1OTkuNzQgNDQ2LjM0IDU5OS43NCAzMTcuMDMgNTk5Ljc0IDMxNy4wMyIvPgogIDxwb2x5Z29uIGNsYXNzPSJhMDQyMGNhYy1lYmU2LTRjMTgtYjk4MS1hYmJhMGJhOWIzNjUiIHBvaW50cz0iNTk5Ljc0IDMxMC4wMyA1NTcuOTcgMjY4LjI2IDU1Ny45NyAzMTAuMDMgNTk5Ljc0IDMxMC4wMyIvPgo8L3N2Zz4K
mediatype: image/svg+xml
install:
spec:
deployments: null
strategy: ""
installModes:
- supported: true
type: OwnNamespace
- supported: false
type: SingleNamespace
- supported: false
type: MultiNamespace
- supported: true
type: AllNamespaces
keywords:
- logging
- loki
links:
- name: Loki Operator
url: https://github.com/grafana/loki
maintainers:
- email: loki-operator-team@googlegroups.com
name: Grafana Loki SIG Operator
maturity: alpha
provider:
name: Grafana.com
version: 0.0.0

@ -0,0 +1,4 @@
resources:
- ../overlays/openshift
- ../samples
- ../scorecard

@ -0,0 +1,22 @@
resources:
- ../../crd
- ../../rbac
- ../../manager
- ./minio
# Adds namespace to all resources.
namespace: default
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
commonLabels:
app.kubernetes.io/name: loki-operator
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/version: "0.0.1"
app.kubernetes.io/part-of: loki-operator
app.kubernetes.io/managed-by: operator-lifecycle-manager
patchesStrategicMerge:
- manager_related_image_patch.yaml
- manager_image_pull_policy_patch.yaml

@ -0,0 +1,10 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
spec:
template:
spec:
containers:
- name: manager
imagePullPolicy: Always

@ -0,0 +1,14 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
spec:
template:
spec:
containers:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
value: docker.io/grafana/loki:2.4.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest

@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio
spec:
selector:
matchLabels:
app.kubernetes.io/name: minio
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/name: minio
spec:
containers:
- command:
- /bin/sh
- -c
- |
mkdir -p /storage/loki && \
minio server /storage
env:
- name: MINIO_ACCESS_KEY
value: minio
- name: MINIO_SECRET_KEY
value: minio123
image: minio/minio
name: minio
ports:
- containerPort: 9000
volumeMounts:
- mountPath: /storage
name: storage
volumes:
- name: storage
persistentVolumeClaim:
claimName: minio

@ -0,0 +1,5 @@
resources:
- pvc.yaml
- service.yaml
- secret.yaml
- deployment.yaml

@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app.kubernetes.io/name: minio
name: minio
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: test
stringData:
endpoint: http://minio.default.svc.cluster.local.:9000
bucketnames: loki
access_key_id: minio
access_key_secret: minio123
type: Opaque

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: minio
spec:
ports:
- port: 9000
protocol: TCP
targetPort: 9000
selector:
app.kubernetes.io/name: minio
type: ClusterIP

@ -0,0 +1,90 @@
resources:
- ../../crd
- ../../rbac
- ../../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
- ../../prometheus
# Adds namespace to all resources.
namespace: openshift-logging
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: loki-operator-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
commonLabels:
app.kubernetes.io/name: loki-operator
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/version: "0.0.1"
app.kubernetes.io/part-of: cluster-logging
app.kubernetes.io/managed-by: operator-lifecycle-manager
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
- manager_related_image_patch.yaml
- manager_run_flags_patch.yaml
- prometheus_service_monitor_patch.yaml
# apiVersion: kustomize.config.k8s.io/v1beta1
# kind: Kustomization
images:
- name: controller
newName: quay.io/openshift-logging/loki-operator
newTag: v0.0.1
# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
#- manager_config_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service

@ -0,0 +1,29 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
image: quay.io/openshift/origin-kube-rbac-proxy:latest
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt"
- "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key"
- "--v=2"
ports:
- containerPort: 8443
name: https
volumeMounts:
- mountPath: /var/run/secrets/serving-cert
name: loki-operator-metrics-cert
volumes:
- name: loki-operator-metrics-cert
secret:
defaultMode: 420
optional: true
secretName: loki-operator-metrics

@ -0,0 +1,16 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
spec:
template:
spec:
containers:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
value: quay.io/openshift-logging/loki:v2.4.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
value: quay.io/observatorium/opa-openshift:latest

@ -0,0 +1,15 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
spec:
template:
spec:
containers:
- name: manager
args:
- "--with-lokistack-gateway"
- "--with-lokistack-gateway-route"
- "--with-cert-signing-service"
- "--with-service-monitors"
- "--with-tls-service-monitors"

@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
name: loki-operator
name: metrics-monitor
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
path: /metrics
targetPort: 8443
scheme: https
interval: 30s
scrapeTimeout: 10s
tlsConfig:
caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt
serverName: loki-operator-controller-manager-metrics-service.openshift-logging.svc

@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-monitoring-config
namespace: openshift-monitoring
data:
config.yaml: |
enableUserWorkload: true

@ -0,0 +1,23 @@
resources:
- logfile_metric_daemonset.yaml
- logfile_metric_role.yaml
- logfile_metric_role_binding.yaml
- logfile_metric_scc.yaml
- logfile_metric_service.yaml
- logfile_metric_service_account.yaml
- logfile_metric_service_monitor.yaml
- storage_size_calculator_config.yaml
- storage_size_calculator.yaml
# Adds namespace to all resources.
namespace: openshift-logging
# Labels to add to all resources and selectors.
# commonLabels:
# someName: someValue
commonLabels:
app.kubernetes.io/name: storage-size-calculator
app.kubernetes.io/instance: storage-size-calculator-v0.0.1
app.kubernetes.io/version: "0.0.1"
app.kubernetes.io/part-of: loki-operator
app.kubernetes.io/managed-by: kubectl-apply

@ -0,0 +1,55 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: log-file-metric-exporter
labels:
name: log-file-metric-exporter
spec:
selector:
matchLabels:
name: log-file-metric-exporter
template:
metadata:
labels:
name: log-file-metric-exporter
spec:
nodeSelector:
kubernetes.io/os: linux
containers:
- name: log-file-metric-exporter
image: quay.io/openshift-logging/log-file-metric-exporter:latest
imagePullPolicy: IfNotPresent
command:
- /usr/local/bin/log-file-metric-exporter
- -verbosity=2
- -dir=/var/log/containers
- -http=:2112
- -keyFile=/var/run/secrets/serving-cert/tls.key
- -crtFile=/var/run/secrets/serving-cert/tls.crt
ports:
- containerPort: 2112
name: logfile-metrics
protocol: TCP
volumeMounts:
- mountPath: /var/run/secrets/serving-cert
name: log-file-metric-exporter-metrics
- mountPath: /var/log
name: logfile-varlog
securityContext:
seLinuxOptions:
type: spc_t
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
serviceAccount: log-file-metric-exporter
volumes:
- name: log-file-metric-exporter-metrics
secret:
defaultMode: 420
optional: true
secretName: log-file-metric-exporter-metrics
- name: logfile-varlog
hostPath:
path: /var/log
- name: storage-size-calculator-ca-bundle
configMap:
name: storage-size-calculator-ca-bundle

@ -0,0 +1,13 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: log-file-metric-exporter-privileged
rules:
- verbs:
- use
apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
resourceNames:
- log-file-metric-exporter-scc

@ -0,0 +1,11 @@
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: log-file-metric-exporter-privileged-binding
subjects:
- kind: ServiceAccount
name: log-file-metric-exporter
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: log-file-metric-exporter-privileged

@ -0,0 +1,43 @@
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: log-file-metric-exporter-scc
allowPrivilegedContainer: true
requiredDropCapabilities:
- MKNOD
- CHOWN
- DAC_OVERRIDE
- FSETID
- FOWNER
- SETGID
- SETUID
- SETPCAP
- NET_BIND_SERVICE
- KILL
allowHostDirVolumePlugin: true
allowHostPorts: false
runAsUser:
type: RunAsAny
users: []
allowHostIPC: false
seLinuxContext:
type: RunAsAny
readOnlyRootFilesystem: false
fsGroup:
type: RunAsAny
groups:
- 'system:cluster-admins'
defaultAddCapabilities: null
supplementalGroups:
type: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- persistentVolumeClaim
- projected
- secret
allowHostPID: false
allowHostNetwork: false
allowPrivilegeEscalation: true
allowedCapabilities: null

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: log-file-metric-exporter-metrics
labels:
name: log-file-metric-exporter
annotations:
service.beta.openshift.io/serving-cert-secret-name: log-file-metric-exporter-metrics
spec:
ports:
- name: logfile-metrics
port: 2112
protocol: TCP
targetPort: logfile-metrics
selector:
name: log-file-metric-exporter

@ -0,0 +1,9 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: log-file-metric-exporter
secrets:
- name: logfile-metric-dockercfg
- name: logfile-metric-token
imagePullSecrets:
- name: logfile-metric-dockercfg

@ -0,0 +1,20 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: monitor-log-file-metric-exporter
labels:
name: log-file-metric-exporter
spec:
selector:
matchLabels:
name: log-file-metric-exporter
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
path: /metrics
port: logfile-metrics
scheme: https
interval: 30s
scrapeTimeout: 10s
tlsConfig:
caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt
serverName: log-file-metric-exporter-metrics.openshift-logging.svc

@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: storage-size-calculator
spec:
selector:
matchLabels:
name: log-file-metric-exporter
replicas: 1
template:
metadata:
labels:
name: log-file-metric-exporter
spec:
containers:
- command:
- /size-calculator
image: quay.io/openshift-logging/storage-size-calculator:latest
imagePullPolicy: Always
name: size-calculator
ports:
- containerPort: 2112
name: logfile-metrics
securityContext:
allowPrivilegeEscalation: false
env:
- name: PROMETHEUS_URL
valueFrom:
secretKeyRef:
name: promsecret
key: prometheus_url
- name: PROMETHEUS_TOKEN
valueFrom:
secretKeyRef:
name: promsecret
key: prometheus_token
terminationGracePeriodSeconds: 10
serviceAccount: log-file-metric-exporter

@ -0,0 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: storage-size-calculator-ca-bundle
annotations:
"service.beta.openshift.io/inject-cabundle": "true"

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: user-workload-monitoring-config
namespace: openshift-user-workload-monitoring
data:
config.yaml: |
prometheus:
retention: 1h

@ -0,0 +1,88 @@
resources:
- ../../crd
- ../../rbac
- ../../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
- ../../prometheus
# Adds namespace to all resources.
namespace: loki-operator
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: loki-operator-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
commonLabels:
app.kubernetes.io/name: loki-operator
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/version: "0.0.1"
app.kubernetes.io/part-of: loki-operator
app.kubernetes.io/managed-by: operator-lifecycle-manager
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
- manager_related_image_patch.yaml
- manager_run_flags_patch.yaml
- prometheus_service_monitor_patch.yaml
images:
- name: controller
newName: quay.io/viaq/loki-operator
newTag: v0.0.1
# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
#- manager_config_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service

@ -0,0 +1,31 @@
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt"
- "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key"
- "--v=2"
ports:
- containerPort: 8443
name: https
volumeMounts:
- mountPath: /var/run/secrets/serving-cert
name: loki-operator-metrics-cert
volumes:
- name: loki-operator-metrics-cert
secret:
defaultMode: 420
optional: true
secretName: loki-operator-metrics

@ -0,0 +1,14 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
spec:
template:
spec:
containers:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
value: docker.io/grafana/loki:2.4.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest

@ -0,0 +1,11 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
spec:
template:
spec:
containers:
- name: manager
args:
- "--with-lokistack-gateway"

@ -0,0 +1,18 @@
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
name: loki-operator
name: metrics-monitor
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
path: /metrics
targetPort: 8443
scheme: https
interval: 30s
scrapeTimeout: 10s
tlsConfig:
caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt
serverName: loki-operator-controller-manager-metrics-service.loki-operator.svc

@ -0,0 +1,2 @@
resources:
- monitor.yaml

@ -0,0 +1,12 @@
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
name: loki-operator
name: metrics-monitor
spec:
selector:
matchLabels:
app.kubernetes.io/name: loki-operator

@ -0,0 +1,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metrics-reader
rules:
- nonResourceURLs: ["/metrics"]
verbs: ["get"]

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-role
rules:
- apiGroups: ["authentication.k8s.io"]
resources:
- tokenreviews
verbs: ["create"]
- apiGroups: ["authorization.k8s.io"]
resources:
- subjectaccessreviews
verbs: ["create"]

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-role
subjects:
- kind: ServiceAccount
name: default
namespace: system

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
annotations:
service.beta.openshift.io/serving-cert-secret-name: loki-operator-metrics
labels:
name: controller-manager-metrics-service
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
selector:
name: loki-operator-controller-manager

@ -0,0 +1,11 @@
resources:
- role.yaml
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
- auth_proxy_service.yaml
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml
- auth_proxy_client_clusterrole.yaml
- prometheus_role.yaml
- prometheus_role_binding.yaml

@ -0,0 +1,27 @@
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-election-role
rules:
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: default
namespace: system

@ -0,0 +1,24 @@
# permissions for end users to edit lokistacks.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: lokistack-editor-role
rules:
- apiGroups:
- loki.grafana.com
resources:
- lokistacks
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- loki.grafana.com
resources:
- lokistacks/status
verbs:
- get

@ -0,0 +1,20 @@
# permissions for end users to view lokistacks.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: lokistack-viewer-role
rules:
- apiGroups:
- loki.grafana.com
resources:
- lokistacks
verbs:
- get
- list
- watch
- apiGroups:
- loki.grafana.com
resources:
- lokistacks/status
verbs:
- get

@ -0,0 +1,18 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
include.release.openshift.io/self-managed-high-availability: "true"
include.release.openshift.io/single-node-developer: "true"
name: prometheus
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch

@ -0,0 +1,16 @@
# Grant cluster-monitoring access to openshift-operators-redhat metrics
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: prometheus
annotations:
include.release.openshift.io/self-managed-high-availability: "true"
include.release.openshift.io/single-node-developer: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: openshift-monitoring

@ -0,0 +1,131 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- serviceaccounts
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- deployments
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- config.openshift.io
resources:
- dnses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
- apiGroups:
- loki.grafana.com
resources:
- lokistacks
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- loki.grafana.com
resources:
- lokistacks/finalizers
verbs:
- update
- apiGroups:
- loki.grafana.com
resources:
- lokistacks/status
verbs:
- get
- patch
- update
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- route.openshift.io
resources:
- routes
verbs:
- create
- get
- list
- update
- watch

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: system

@ -0,0 +1,4 @@
## Append samples you want in your CSV to this file as resources ##
resources:
- loki_v1beta1_lokistack.yaml
# +kubebuilder:scaffold:manifestskustomizesamples

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save