diff --git a/.github/workflows/operator-bundle.yaml b/.github/workflows/operator-bundle.yaml new file mode 100644 index 0000000000..f3a83cfcdf --- /dev/null +++ b/.github/workflows/operator-bundle.yaml @@ -0,0 +1,32 @@ +name: operator bundle + +on: + push: + paths: + - 'operator/**' + branches: [ master ] + pull_request: + paths: + - 'operator/**' + branches: [ master ] + +jobs: + build: + name: build + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + go: ['1.16'] + steps: + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + id: go + - uses: actions/checkout@v2 + - name: Install make + run: sudo apt-get install make + - name: make bundle + run: make bundle + working-directory: ./operator diff --git a/.github/workflows/operator-images.yaml b/.github/workflows/operator-images.yaml new file mode 100644 index 0000000000..0ba5217beb --- /dev/null +++ b/.github/workflows/operator-images.yaml @@ -0,0 +1,132 @@ +name: operator images + +on: + push: + paths: + - 'operator/**' + branches: + - master + +env: + IMAGE_REGISTRY: quay.io + IMAGE_ORGANIZATION: openshift-logging + IMAGE_OPERATOR_NAME: loki-operator + IMAGE_BUNDLE_NAME: loki-operator-bundle + IMAGE_CALCULATOR_NAME: storage-size-calculator + +jobs: + publish-manager: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Login to Quay.io + uses: docker/login-action@v1 + with: + registry: quay.io + logout: true + username: ${{ secrets.OPENSHIFT_LOGGING_USER }} + password: ${{ secrets.OPENSHIFT_LOGGING_PASS }} + + - name: Get image tags + id: image_tags + run: | + echo -n ::set-output name=IMAGE_TAGS:: + PULLSPEC="$IMAGE_REGISTRY/$IMAGE_ORGANIZATION/$IMAGE_OPERATOR_NAME" + TAGS=("$PULLSPEC:latest", "$PULLSPEC:v0.0.1") + BUILD_DATE="$(date -u +'%Y-%m-%d')" + VCS_BRANCH="$(git rev-parse --abbrev-ref HEAD)" + VCS_REF="$(git rev-parse --short HEAD)" + TAGS+=("$PULLSPEC:$VCS_BRANCH-$BUILD_DATE-$VCS_REF") + ( IFS=$','; echo "${TAGS[*]}" ) + + - name: Build and publish image on quay.io + uses: docker/build-push-action@v2 + with: + context: ./operator + push: true + tags: "${{ steps.image_tags.outputs.IMAGE_TAGS }}" + + publish-bundle: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Login to Quay.io + uses: docker/login-action@v1 + with: + registry: quay.io + logout: true + username: ${{ secrets.OPENSHIFT_LOGGING_USER }} + password: ${{ secrets.OPENSHIFT_LOGGING_PASS }} + + - name: Get image tags + id: image_tags + run: | + echo -n ::set-output name=IMAGE_TAGS:: + PULLSPEC="$IMAGE_REGISTRY/$IMAGE_ORGANIZATION/$IMAGE_BUNDLE_NAME" + TAGS=("$PULLSPEC:latest", "$PULLSPEC:v0.0.1") + BUILD_DATE="$(date -u +'%Y-%m-%d')" + VCS_BRANCH="$(git rev-parse --abbrev-ref HEAD)" + VCS_REF="$(git rev-parse --short HEAD)" + TAGS+=("$PULLSPEC:$VCS_BRANCH-$BUILD_DATE-$VCS_REF") + ( IFS=$','; echo "${TAGS[*]}" ) + + - name: Build and publish image on quay.io + uses: docker/build-push-action@v2 + with: + context: ./operator + file: bundle.Dockerfile + push: true + tags: "${{ steps.image_tags.outputs.IMAGE_TAGS }}" + + publish-size-calculator: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Login to Quay.io + uses: docker/login-action@v1 + with: + registry: quay.io + logout: true + username: ${{ secrets.OPENSHIFT_LOGGING_USER }} + password: ${{ secrets.OPENSHIFT_LOGGING_PASS }} + + - name: Get image tags + id: image_tags + run: | + echo -n ::set-output name=IMAGE_TAGS:: + PULLSPEC="$IMAGE_REGISTRY/$IMAGE_ORGANIZATION/$IMAGE_CALCULATOR_NAME" + TAGS=("$PULLSPEC:latest", "$PULLSPEC:v0.0.1") + BUILD_DATE="$(date -u +'%Y-%m-%d')" + VCS_BRANCH="$(git rev-parse --abbrev-ref HEAD)" + VCS_REF="$(git rev-parse --short HEAD)" + TAGS+=("$PULLSPEC:$VCS_BRANCH-$BUILD_DATE-$VCS_REF") + ( IFS=$','; echo "${TAGS[*]}" ) + + - name: Build and publish image on quay.io + uses: docker/build-push-action@v2 + with: + context: ./operator + file: calculator.Dockerfile + push: true + tags: "${{ steps.image_tags.outputs.IMAGE_TAGS }}" diff --git a/.github/workflows/operator-scorecard.yaml b/.github/workflows/operator-scorecard.yaml new file mode 100644 index 0000000000..595d3ac5bd --- /dev/null +++ b/.github/workflows/operator-scorecard.yaml @@ -0,0 +1,35 @@ +name: operator scorecard + +on: + push: + paths: + - 'operator/**' + branches: [ master ] + pull_request: + paths: + - 'operator/**' + branches: [ master ] + +jobs: + build: + name: scorecard + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + go: ['1.16'] + steps: + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + id: go + - uses: engineerd/setup-kind@v0.5.0 + with: + version: "v0.11.1" + - uses: actions/checkout@v2 + - name: Install make + run: sudo apt-get install make + - name: Run scorecard + run: make scorecard + working-directory: ./operator diff --git a/.github/workflows/operator.yaml b/.github/workflows/operator.yaml new file mode 100644 index 0000000000..86de11fe58 --- /dev/null +++ b/.github/workflows/operator.yaml @@ -0,0 +1,102 @@ +name: operator build + +on: + push: + paths: + - 'operator/**' + branches: [ master ] + pull_request: + paths: + - 'operator/**' + branches: [ master ] + +jobs: + lint: + name: lint + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + go: ['1.16'] + steps: + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + id: go + - uses: actions/checkout@v2 + - name: Lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.38 + skip-go-installation: true + only-new-issues: true + args: --timeout=2m + working-directory: ./operator + + build-manager: + name: Build Manager + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + go: ['1.16'] + steps: + - name: Install make + run: sudo apt-get install make + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + id: go + - uses: actions/checkout@v2 + - name: Build Manager + working-directory: ./operator + run: |- + make manager && git diff --exit-code + + build-broker: + name: Build Broker + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + go: ['1.16'] + steps: + - name: Install make + run: sudo apt-get install make + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + id: go + - uses: actions/checkout@v2 + - name: Build Broker + working-directory: ./operator + run: |- + make bin/loki-broker && git diff --exit-code + + test: + name: test + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + go: ['1.16'] + steps: + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + id: go + - uses: actions/checkout@v2 + - name: Run tests + working-directory: ./operator + run: go test -coverprofile=profile.cov ./... + - name: Send coverage + uses: shogo82148/actions-goveralls@v1 + working-directory: ./operator + with: + path-to-profile: profile.cov + flag-name: Go-${{ matrix.go }} + shallow: true diff --git a/operator/.bingo/.gitignore b/operator/.bingo/.gitignore new file mode 100644 index 0000000000..4f2055b6e4 --- /dev/null +++ b/operator/.bingo/.gitignore @@ -0,0 +1,12 @@ + +# Ignore everything +* + +# But not these files: +!.gitignore +!*.mod +!README.md +!Variables.mk +!variables.env + +*tmp.mod diff --git a/operator/.bingo/README.md b/operator/.bingo/README.md new file mode 100644 index 0000000000..7a5c2d4f6d --- /dev/null +++ b/operator/.bingo/README.md @@ -0,0 +1,14 @@ +# Project Development Dependencies. + +This is directory which stores Go modules with pinned buildable package that is used within this repository, managed by https://github.com/bwplotka/bingo. + +* Run `bingo get` to install all tools having each own module file in this directory. +* Run `bingo get ` to install that have own module file in this directory. +* For Makefile: Make sure to put `include .bingo/Variables.mk` in your Makefile, then use $() variable where is the .bingo/.mod. +* For shell: Run `source .bingo/variables.env` to source all environment variable for each tool. +* For go: Import `.bingo/variables.go` to for variable names. +* See https://github.com/bwplotka/bingo or -h on how to add, remove or change binaries dependencies. + +## Requirements + +* Go 1.14+ diff --git a/operator/.bingo/Variables.mk b/operator/.bingo/Variables.mk new file mode 100644 index 0000000000..48f751e022 --- /dev/null +++ b/operator/.bingo/Variables.mk @@ -0,0 +1,55 @@ +# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.5.1. DO NOT EDIT. +# All tools are designed to be build inside $GOBIN. +BINGO_DIR := $(dir $(lastword $(MAKEFILE_LIST))) +GOPATH ?= $(shell go env GOPATH) +GOBIN ?= $(firstword $(subst :, ,${GOPATH}))/bin +GO ?= $(shell which go) + +# Below generated variables ensure that every time a tool under each variable is invoked, the correct version +# will be used; reinstalling only if needed. +# For example for bingo variable: +# +# In your main Makefile (for non array binaries): +# +#include .bingo/Variables.mk # Assuming -dir was set to .bingo . +# +#command: $(BINGO) +# @echo "Running bingo" +# @$(BINGO) +# +BINGO := $(GOBIN)/bingo-v0.4.0 +$(BINGO): $(BINGO_DIR)/bingo.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/bingo-v0.4.0" + @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.4.0 "github.com/bwplotka/bingo" + +CONTROLLER_GEN := $(GOBIN)/controller-gen-v0.5.0 +$(CONTROLLER_GEN): $(BINGO_DIR)/controller-gen.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/controller-gen-v0.5.0" + @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=controller-gen.mod -o=$(GOBIN)/controller-gen-v0.5.0 "sigs.k8s.io/controller-tools/cmd/controller-gen" + +GOFUMPT := $(GOBIN)/gofumpt-v0.1.1 +$(GOFUMPT): $(BINGO_DIR)/gofumpt.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/gofumpt-v0.1.1" + @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=gofumpt.mod -o=$(GOBIN)/gofumpt-v0.1.1 "mvdan.cc/gofumpt" + +GOLANGCI_LINT := $(GOBIN)/golangci-lint-v1.38.0 +$(GOLANGCI_LINT): $(BINGO_DIR)/golangci-lint.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/golangci-lint-v1.38.0" + @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v1.38.0 "github.com/golangci/golangci-lint/cmd/golangci-lint" + +KUSTOMIZE := $(GOBIN)/kustomize-v3.8.7 +$(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/kustomize-v3.8.7" + @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=kustomize.mod -o=$(GOBIN)/kustomize-v3.8.7 "sigs.k8s.io/kustomize/kustomize/v3" + +OPERATOR_SDK := $(GOBIN)/operator-sdk-v1.11.0 +$(OPERATOR_SDK): $(BINGO_DIR)/operator-sdk.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/operator-sdk-v1.11.0" + @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=operator-sdk.mod -o=$(GOBIN)/operator-sdk-v1.11.0 "github.com/operator-framework/operator-sdk/cmd/operator-sdk" + diff --git a/operator/.bingo/bingo.mod b/operator/.bingo/bingo.mod new file mode 100644 index 0000000000..0e08d26de0 --- /dev/null +++ b/operator/.bingo/bingo.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.16 + +require github.com/bwplotka/bingo v0.4.0 diff --git a/operator/.bingo/controller-gen.mod b/operator/.bingo/controller-gen.mod new file mode 100644 index 0000000000..d9323d1e76 --- /dev/null +++ b/operator/.bingo/controller-gen.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.16 + +require sigs.k8s.io/controller-tools v0.5.0 // cmd/controller-gen diff --git a/operator/.bingo/go.mod b/operator/.bingo/go.mod new file mode 100644 index 0000000000..610249af0b --- /dev/null +++ b/operator/.bingo/go.mod @@ -0,0 +1 @@ +module _ // Fake go.mod auto-created by 'bingo' for go -moddir compatibility with non-Go projects. Commit this file, together with other .mod files. \ No newline at end of file diff --git a/operator/.bingo/gofumpt.mod b/operator/.bingo/gofumpt.mod new file mode 100644 index 0000000000..d23f49c39a --- /dev/null +++ b/operator/.bingo/gofumpt.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.16 + +require mvdan.cc/gofumpt v0.1.1 diff --git a/operator/.bingo/golangci-lint.mod b/operator/.bingo/golangci-lint.mod new file mode 100644 index 0000000000..4ae745cd31 --- /dev/null +++ b/operator/.bingo/golangci-lint.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.16 + +require github.com/golangci/golangci-lint v1.38.0 // cmd/golangci-lint diff --git a/operator/.bingo/kustomize.mod b/operator/.bingo/kustomize.mod new file mode 100644 index 0000000000..32bec0649e --- /dev/null +++ b/operator/.bingo/kustomize.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.16 + +require sigs.k8s.io/kustomize/kustomize/v3 v3.8.7 diff --git a/operator/.bingo/operator-sdk.mod b/operator/.bingo/operator-sdk.mod new file mode 100644 index 0000000000..68b8ddbc77 --- /dev/null +++ b/operator/.bingo/operator-sdk.mod @@ -0,0 +1,13 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.16 + +replace github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible + +replace github.com/containerd/containerd => github.com/containerd/containerd v1.4.3 + +replace github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.10.0 + +replace golang.org/x/text => golang.org/x/text v0.3.3 + +require github.com/operator-framework/operator-sdk v1.11.0 // cmd/operator-sdk diff --git a/operator/.bingo/variables.env b/operator/.bingo/variables.env new file mode 100644 index 0000000000..7bb3fe942f --- /dev/null +++ b/operator/.bingo/variables.env @@ -0,0 +1,22 @@ +# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.5.1. DO NOT EDIT. +# All tools are designed to be build inside $GOBIN. +# Those variables will work only until 'bingo get' was invoked, or if tools were installed via Makefile's Variables.mk. +GOBIN=${GOBIN:=$(go env GOBIN)} + +if [ -z "$GOBIN" ]; then + GOBIN="$(go env GOPATH)/bin" +fi + + +BINGO="${GOBIN}/bingo-v0.4.0" + +CONTROLLER_GEN="${GOBIN}/controller-gen-v0.5.0" + +GOFUMPT="${GOBIN}/gofumpt-v0.1.1" + +GOLANGCI_LINT="${GOBIN}/golangci-lint-v1.38.0" + +KUSTOMIZE="${GOBIN}/kustomize-v3.8.7" + +OPERATOR_SDK="${GOBIN}/operator-sdk-v1.11.0" + diff --git a/operator/.dockerignore b/operator/.dockerignore new file mode 100644 index 0000000000..243f81a508 --- /dev/null +++ b/operator/.dockerignore @@ -0,0 +1,5 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore all files which are not go type +!**/*.go +!**/*.mod +!**/*.sum diff --git a/operator/.gitignore b/operator/.gitignore new file mode 100644 index 0000000000..c0a7a54cac --- /dev/null +++ b/operator/.gitignore @@ -0,0 +1,25 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin +testbin/* + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ diff --git a/operator/.golangci.yaml b/operator/.golangci.yaml new file mode 100644 index 0000000000..36f87d1941 --- /dev/null +++ b/operator/.golangci.yaml @@ -0,0 +1,43 @@ +--- +run: + tests: false + skip-files: + - "example_.+_test.go$" + +# golangci.com configuration +# https://github.com/golangci/golangci/wiki/Configuration +linters-settings: + govet: + check-shadowing: true + maligned: + suggest-new: true + misspell: + locale: US + +linters: + enable-all: false + enable: + - deadcode # Finds unused code + - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases + - goerr113 # checks that errors are wrapped according to go 1.13 error wrapping tools + - gofumpt # checks that gofumpt was run on all source code + - goimports # checks that goimports was run on all source code + - golint + - gosimple # Linter for Go source code that specializes in simplifying a code + - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ineffassign # Detects when assignments to existing variables are not used + - misspell # spell checker + - rowserrcheck # checks whether Err of rows is checked successfully + - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - structcheck # Finds unused struct fields + - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + - unused # Checks Go code for unused constants, variables, functions and types + - varcheck # Finds unused global variables and constants + +issues: + exclude-use-default: false + exclude-rules: + # - text: "could be of size" + # path: api/v1beta1/lokistack_types.go + # linters: + # - maligned diff --git a/operator/CONTRIBUTING.md b/operator/CONTRIBUTING.md new file mode 100644 index 0000000000..aa39cb9225 --- /dev/null +++ b/operator/CONTRIBUTING.md @@ -0,0 +1,20 @@ +Contributing to Loki Operator + +## Ideology + +OpenShift has proven to be a powerful and successful platform for running containers in production. Our primary goal is to bring Loki Operator to our customers. That being said, it is a very large platform intended for large-scale production use. It is not intended to be ephemeral. + +The tools required to run and test an OCP cluster are complex and cumbersome. The current processes to build an OCP cluster include the slack cluster-bot, openshift-install script, and CRC. The fastest route to create a working OCP cluster is 45 minutes. CRC *may* be faster, but it requires over [half of your local machine's resources](https://coreos.slack.com/archives/GGUR75P60/p1591803889037800) and doesn’t handle sleeping/suspending very well. Using openshift-install comes with its own [headaches](https://coreos.slack.com/archives/GGUR75P60/p1615458361119300). These blockers cause a significant amount of [wasted time](https://coreos.slack.com/archives/GGUR75P60/p1599242159479000?thread_ts=1599241354.478700&cid=GGUR75P60) that could be spent on more valuable things. + +Nevertheless, I argue that none of this is necessary. The problems are caused when we bastardize a large, complex, production platform for testing and tooling. OpenShift is a superset of Kubernetes. Operators are now Kubernetes native. Given this reality, we have called the Loki Operator a Kubernetes operator rather than an OpenShift operator. This may seem like a trivial delineation, but it isn’t. The operator has been designed from the beginning using Kubernetes tools and APIs. This has allowed us to build, test, and deploy in very little time with very little effort. It is not uncommon to create a pull request and have it [reviewed and merged](https://github.com/grafana/loki/pulls?q=is%3Apr+is%3Aclosed) within 15 minutes. + +There are certainly OCP exclusives that we want to program into the Loki Operator, but this shouldn’t block or break the primary objectives. In other words, the Loki Operator should be Kubernetes first and OpenShift second. The Loki Operator should be open to using the OpenShift APIs without requiring them. All tools, automation, scripts, make targets, etc, should work naturally with Kubernetes and Kubernetes compatible APIs. OCP exclusives should be opt-in. It might be natural for you to think this causes obstruction for deploying to OCP, but that is far from true. Packaging for OCP should be a scripted process that, once opted in, should build all of the necessary components. So far, it has proven to be successful. + +## Tooling + +We use [KinD](https://github.com/kubernetes-sigs/kind) to deploy and test the Loki Operator. We have had no compatibility issues, no wasted time on a learning curve, no failed clusters, no token expirations, no cluster expirations, no spinning laptop fans from gluttonous virtual machines, etc. It takes approximately 20 seconds to create a local KinD cluster and your machine won’t even notice it’s running. The cluster is fully compatible with all Kubernetes APIs and the operator runs on KinD perfectly. After your KinD cluster is created your kubeconfig is updated and the Makefile will work. The Makefiles and scripts are written to work with kubectl. This abstraction prevents any unnecessary complications caused by magic processes like deploying images to internal clusters, etc. + + +## Testing + +Tests should be succinct and without dependencies. This means that unit tests are the de-facto form of testing the Loki Operator. Unit tests are written with the standard Go library using [testify](https://github.com/stretchr/testify) for assertions. [Counterfeiter](https://github.com/maxbrunsfeld/counterfeiter) is included for generating test fakes and stubs for all dependencies. This library provides an API for generating fake implementations of interfaces for injecting them into testable units of code. Unit tests should implement or stub *only the parts required to test*. Large, all-inclusive structs should be avoided in favor of concise, single responsibility functions. This encourages small tests with minimal assertions to keep them hyper-focused, making tests easy to create *and* maintain. diff --git a/operator/Dockerfile b/operator/Dockerfile new file mode 100644 index 0000000000..d48c34d04c --- /dev/null +++ b/operator/Dockerfile @@ -0,0 +1,28 @@ +# Build the manager binary +FROM golang:1.16 as builder + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY main.go main.go +COPY api/ api/ +COPY controllers/ controllers/ +COPY internal/ internal/ + +# Build +RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -a -o manager main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/operator/Makefile b/operator/Makefile new file mode 100644 index 0000000000..b6d7187e66 --- /dev/null +++ b/operator/Makefile @@ -0,0 +1,220 @@ +# include the bingo binary variables. This enables the bingo versions to be +# referenced here as make variables. For example: $(GOLANGCI_LINT) +include .bingo/Variables.mk + +# set the default target here, because the include above will automatically set +# it to the first defined target +.DEFAULT_GOAL := default +default: all + +# CLUSTER_LOGGING_VERSION +# defines the version of the OpenShift Cluster Logging product. +# Updates this value when a new version of the product should include this operator and its bundle. +CLUSTER_LOGGING_VERSION ?= 5.1.preview.1 + +# CLUSTER_LOGGING_NS +# defines the default namespace of the OpenShift Cluster Logging product. +CLUSTER_LOGGING_NS ?= openshift-logging + +# VERSION +# defines the project version for the bundle. +# Update this value when you upgrade the version of your project. +# To re-generate a bundle for another specific version without changing the standard setup, you can: +# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) +# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) +VERSION ?= v0.0.1 +CHANNELS ?= "tech-preview" +DEFAULT_CHANNELS ?= "tech-preview" + +# CHANNELS define the bundle channels used in the bundle. +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "preview,fast,stable") +# To re-generate a bundle for other specific channels without changing the standard setup, you can: +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=preview,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="preview,fast,stable") +ifneq ($(origin CHANNELS), undefined) +BUNDLE_CHANNELS := --channels=$(CHANNELS) +endif + +# DEFAULT_CHANNEL defines the default channel used in the bundle. +# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") +# To re-generate a bundle for any other default channel without changing the default setup, you can: +# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) +# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") +ifneq ($(origin DEFAULT_CHANNEL), undefined) +BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) +endif +BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) + +REGISTRY_ORG ?= openshift-logging + +# BUNDLE_IMG defines the image:tag used for the bundle. +# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) +BUNDLE_IMG ?= quay.io/$(REGISTRY_ORG)/loki-operator-bundle:$(VERSION) + +CALCULATOR_IMG ?= quay.io/$(REGISTRY_ORG)/storage-size-calculator:latest + +GO_FILES := $(shell find . -type f -name '*.go') + +# Image URL to use all building/pushing image targets +IMG ?= quay.io/$(REGISTRY_ORG)/loki-operator:$(VERSION) + +# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) +CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +all: generate lint manager bin/loki-broker + +OCI_RUNTIME ?= $(shell which podman || which docker) + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: deps +deps: go.mod go.sum + go mod tidy + go mod download + go mod verify + +cli: deps bin/loki-broker ## Build loki-broker CLI binary +bin/loki-broker: $(GO_FILES) | generate + go build -o $@ ./cmd/loki-broker/ + +manager: deps generate ## Build manager binary + go build -o bin/manager main.go + +size-calculator: deps generate ## Build size-calculator binary + go build -o bin/size-calculator main.go + +go-generate: ## Run go generate + go generate ./... + +generate: $(CONTROLLER_GEN) ## Generate controller and crd code + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +test: deps generate go-generate lint manifests ## Run tests +test: $(GO_FILES) + go test ./... -coverprofile cover.out + +scorecard: generate go-generate bundle ## Run scorecard test + $(OPERATOR_SDK) scorecard bundle + +lint: $(GOLANGCI_LINT) | generate ## Run golangci-lint on source code. + $(GOLANGCI_LINT) run ./... + +fmt: $(GOFUMPT) ## Run gofumpt on source code. + find . -type f -name '*.go' -not -path '**/fake_*.go' -exec $(GOFUMPT) -s -w {} \; + +oci-build: ## Build the image + $(OCI_RUNTIME) build -t ${IMG} . + +oci-push: ## Push the image + $(OCI_RUNTIME) push ${IMG} + +.PHONY: bundle ## Generate bundle manifests and metadata, then validate generated files. +bundle: manifests $(KUSTOMIZE) $(OPERATOR_SDK) + $(OPERATOR_SDK) generate kustomize manifests -q + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle -q --overwrite --version $(subst v,,$(VERSION)) $(BUNDLE_METADATA_OPTS) + $(OPERATOR_SDK) bundle validate ./bundle + +.PHONY: bundle-build +bundle-build: ## Build the bundle image. + $(OCI_RUNTIME) build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + +##@ Deployment + +run: generate manifests ## Run against the configured Kubernetes cluster in ~/.kube/config + go run ./main.go + +install: manifests $(KUSTOMIZE) ## Install CRDs into a cluster + $(KUSTOMIZE) build config/crd | kubectl apply -f - + +uninstall: manifests $(KUSTOMIZE) ## Uninstall CRDs from a cluster + $(KUSTOMIZE) build config/crd | kubectl delete -f - + +deploy: manifests $(KUSTOMIZE) ## Deploy controller in the configured Kubernetes cluster in ~/.kube/config + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/overlays/development | kubectl apply -f - + +undeploy: ## Undeploy controller from the configured Kubernetes cluster in ~/.kube/config + $(KUSTOMIZE) build config/overlays/development | kubectl delete -f - + +# Build and push the bundle image to a container registry. +.PHONY: olm-deploy-bundle +olm-deploy-bundle: bundle bundle-build + $(MAKE) oci-push IMG=$(BUNDLE_IMG) + +# Build and push the operator image to a container registry. +.PHONY: olm-deploy-operator +olm-deploy-operator: oci-build oci-push + +.PHONY: olm-deploy +ifeq ($(or $(findstring openshift-logging,$(IMG)),$(findstring openshift-logging,$(BUNDLE_IMG))),openshift-logging) +olm-deploy: ## Deploy the operator bundle and the operator via OLM into an Kubernetes cluster selected via KUBECONFIG. + $(error Set variable REGISTRY_ORG to use a custom container registry org account for local development) +else +olm-deploy: olm-deploy-bundle olm-deploy-operator $(OPERATOR_SDK) + kubectl create ns $(CLUSTER_LOGGING_NS) + kubectl label ns/$(CLUSTER_LOGGING_NS) openshift.io/cluster-monitoring=true --overwrite + $(OPERATOR_SDK) run bundle -n $(CLUSTER_LOGGING_NS) --install-mode OwnNamespace $(BUNDLE_IMG) +endif + +# Build and push the secret for the S3 storage +.PHONY: olm-deploy-example-storage-secret +olm-deploy-example-storage-secret: + hack/deploy-example-secret.sh $(CLUSTER_LOGGING_NS) + +.PHONY: olm-deploy-example +olm-deploy-example: olm-deploy olm-deploy-example-storage-secret ## Deploy example LokiStack custom resource + kubectl -n $(CLUSTER_LOGGING_NS) create -f hack/lokistack_dev.yaml + +.PHONY: olm-undeploy +olm-undeploy: $(OPERATOR_SDK) ## Cleanup deployments of the operator bundle and the operator via OLM on an OpenShift cluster selected via KUBECONFIG. + $(OPERATOR_SDK) cleanup loki-operator + kubectl delete ns $(CLUSTER_LOGGING_NS) + +.PHONY: deploy-size-calculator +ifeq ($(findstring openshift-logging,$(CALCULATOR_IMG)),openshift-logging) +deploy-size-calculator: ## Deploy storage size calculator (OpenShift only!) + $(error Set variable REGISTRY_ORG to use a custom container registry org account for local development) +else +deploy-size-calculator: $(KUSTOMIZE) ## Deploy storage size calculator (OpenShift only!) + kubectl apply -f config/overlays/openshift/size-calculator/cluster_monitoring_config.yaml + kubectl apply -f config/overlays/openshift/size-calculator/user_workload_monitoring_config.yaml + ./hack/deploy-prometheus-secret.sh + $(KUSTOMIZE) build config/overlays/openshift/size-calculator | kubectl apply -f - +endif + +.PHONY: undeploy-size-calculator +undeploy-size-calculator: ## Undeploy storage size calculator + $(KUSTOMIZE) build config/overlays/openshift/size-calculator | kubectl delete -f - + +oci-build-calculator: ## Build the calculator image + $(OCI_RUNTIME) build -f calculator.Dockerfile -t $(CALCULATOR_IMG) . + +oci-push-calculator: ## Push the calculator image + $(OCI_RUNTIME) push $(CALCULATOR_IMG) diff --git a/operator/PROJECT b/operator/PROJECT new file mode 100644 index 0000000000..29cbcc3a9d --- /dev/null +++ b/operator/PROJECT @@ -0,0 +1,19 @@ +domain: grafana.com +layout: +- go.kubebuilder.io/v3 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: loki-operator +repo: github.com/grafana/loki +resources: +- api: + crdVersion: v1beta1 + namespaced: true + controller: true + domain: grafana.com + group: loki + kind: LokiStack + path: github.com/grafana/loki/operator/api/v1beta1 + version: v1beta1 +version: "3" diff --git a/operator/README.md b/operator/README.md new file mode 100644 index 0000000000..f9818f8bfd --- /dev/null +++ b/operator/README.md @@ -0,0 +1,29 @@ +![](img/loki-operator.png) + +# Loki Operator + +This is the Kubernetes Operator for [Loki](https://grafana.com/docs/loki/latest/) +provided by the Grafana Loki SIG operator. **This is currently a work in +progress and is subject to large scale changes that will break any dependencies. +Do not use this in any production environment.** + +## Development + +Requirements: + + 1. Running Kubernetes cluster. Our team uses + [KinD](https://kind.sigs.k8s.io/docs/user/quick-start/) or + [K3s](https://k3s.io/) for simplicity. + 1. A container registry that you and your Kubernetes cluster can reach. We + recommend [quay.io](https://quay.io/signin/). + +Build and push the container image and then deploy the operator with `make +oci-build oci-push deploy IMG=quay.io/my-team/loki-operator:latest`. This will +deploy to your active Kubernetes/OpenShift cluster defined by your local +[kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/). + +For detailed step-by-step guide on how to start development and testing on Kind and OpenShift, +check our [documentation](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md) + +Also, there is a [document](https://github.com/grafana/loki/blob/master/operator/docs/hack_operator_make_run.md) which +demonstrates how to use Loki Operator for development and testing locally without deploying the operator each time on Kind and OpenShift. diff --git a/operator/_config.yml b/operator/_config.yml new file mode 100644 index 0000000000..c741881743 --- /dev/null +++ b/operator/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-slate \ No newline at end of file diff --git a/operator/api/v1beta1/groupversion_info.go b/operator/api/v1beta1/groupversion_info.go new file mode 100644 index 0000000000..7669f7372c --- /dev/null +++ b/operator/api/v1beta1/groupversion_info.go @@ -0,0 +1,20 @@ +// Package v1beta1 contains API Schema definitions for the loki v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=loki.grafana.com +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "loki.grafana.com", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/operator/api/v1beta1/lokistack_types.go b/operator/api/v1beta1/lokistack_types.go new file mode 100644 index 0000000000..681017ec03 --- /dev/null +++ b/operator/api/v1beta1/lokistack_types.go @@ -0,0 +1,660 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// ManagementStateType defines the type for CR management states. +// +// +kubebuilder:validation:Enum=Managed;Unmanaged +type ManagementStateType string + +const ( + // ManagementStateManaged when the LokiStack custom resource should be + // reconciled by the operator. + ManagementStateManaged ManagementStateType = "Managed" + + // ManagementStateUnmanaged when the LokiStack custom resource should not be + // reconciled by the operator. + ManagementStateUnmanaged ManagementStateType = "Unmanaged" +) + +// LokiStackSizeType declares the type for loki cluster scale outs. +// +// +kubebuilder:validation:Enum="1x.extra-small";"1x.small";"1x.medium" +type LokiStackSizeType string + +const ( + // SizeOneXExtraSmall defines the size of a single Loki deployment + // with extra small resources/limits requirements and without HA support. + // This size is ultimately dedicated for development and demo purposes. + // DO NOT USE THIS IN PRODUCTION! + // + // FIXME: Add clear description of ingestion/query performance expectations. + SizeOneXExtraSmall LokiStackSizeType = "1x.extra-small" + + // SizeOneXSmall defines the size of a single Loki deployment + // with small resources/limits requirements and HA support for all + // Loki components. This size is dedicated for setup **without** the + // requirement for single replication factor and auto-compaction. + // + // FIXME: Add clear description of ingestion/query performance expectations. + SizeOneXSmall LokiStackSizeType = "1x.small" + + // SizeOneXMedium defines the size of a single Loki deployment + // with small resources/limits requirements and HA support for all + // Loki components. This size is dedicated for setup **with** the + // requirement for single replication factor and auto-compaction. + // + // FIXME: Add clear description of ingestion/query performance expectations. + SizeOneXMedium LokiStackSizeType = "1x.medium" +) + +// SubjectKind is a kind of LokiStack Gateway RBAC subject. +// +// +kubebuilder:validation:Enum=user;group +type SubjectKind string + +const ( + // User represents a subject that is a user. + User SubjectKind = "user" + // Group represents a subject that is a group. + Group SubjectKind = "group" +) + +// Subject represents a subject that has been bound to a role. +type Subject struct { + Name string `json:"name"` + Kind SubjectKind `json:"kind"` +} + +// RoleBindingsSpec binds a set of roles to a set of subjects. +type RoleBindingsSpec struct { + Name string `json:"name"` + Subjects []Subject `json:"subjects"` + Roles []string `json:"roles"` +} + +// PermissionType is a LokiStack Gateway RBAC permission. +// +// +kubebuilder:validation:Enum=read;write +type PermissionType string + +const ( + // Write gives access to write data to a tenant. + Write PermissionType = "write" + // Read gives access to read data from a tenant. + Read PermissionType = "read" +) + +// RoleSpec describes a set of permissions to interact with a tenant. +type RoleSpec struct { + Name string `json:"name"` + Resources []string `json:"resources"` + Tenants []string `json:"tenants"` + Permissions []PermissionType `json:"permissions"` +} + +// OPASpec defines the opa configuration spec for lokiStack Gateway component. +type OPASpec struct { + // URL defines the third-party endpoint for authorization. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OpenPolicyAgent URL" + URL string `json:"url"` +} + +// AuthorizationSpec defines the opa, role bindings and roles +// configuration per tenant for lokiStack Gateway component. +type AuthorizationSpec struct { + // OPA defines the spec for the third-party endpoint for tenant's authorization. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OPA Configuration" + OPA *OPASpec `json:"opa"` + // Roles defines a set of permissions to interact with a tenant. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Static Roles" + Roles []RoleSpec `json:"roles"` + // RoleBindings defines configuration to bind a set of roles to a set of subjects. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Static Role Bindings" + RoleBindings []RoleBindingsSpec `json:"roleBindings"` +} + +// TenantSecretSpec is a secret reference containing name only +// for a secret living in the same namespace as the LokiStack custom resource. +type TenantSecretSpec struct { + // Name of a secret in the namespace configured for tenant secrets. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:Secret",displayName="Tenant Secret Name" + Name string `json:"name"` +} + +// OIDCSpec defines the oidc configuration spec for lokiStack Gateway component. +type OIDCSpec struct { + // Secret defines the spec for the clientID, clientSecret and issuerCAPath for tenant's authentication. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant Secret" + Secret *TenantSecretSpec `json:"secret"` + // IssuerURL defines the URL for issuer. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Issuer URL" + IssuerURL string `json:"issuerURL"` + // RedirectURL defines the URL for redirect. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Redirect URL" + RedirectURL string `json:"redirectURL"` + GroupClaim string `json:"groupClaim"` + UsernameClaim string `json:"usernameClaim"` +} + +// AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component. +type AuthenticationSpec struct { + // TenantName defines the name of the tenant. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant Name" + TenantName string `json:"tenantName"` + // TenantID defines the id of the tenant. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant ID" + TenantID string `json:"tenantId"` + // OIDC defines the spec for the OIDC tenant's authentication. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OIDC Configuration" + OIDC *OIDCSpec `json:"oidc"` +} + +// ModeType is the authentication/authorization mode in which LokiStack Gateway will be configured. +// +// +kubebuilder:validation:Enum=static;dynamic;openshift-logging +type ModeType string + +const ( + // Static mode asserts the Authorization Spec's Roles and RoleBindings + // using an in-process OpenPolicyAgent Rego authorizer. + Static ModeType = "static" + // Dynamic mode delegates the authorization to a third-party OPA-compatible endpoint. + Dynamic ModeType = "dynamic" + // OpenshiftLogging mode provides fully automatic OpenShift in-cluster authentication and authorization support. + OpenshiftLogging ModeType = "openshift-logging" +) + +// TenantsSpec defines the mode, authentication and authorization +// configuration of the lokiStack gateway component. +type TenantsSpec struct { + // Mode defines the mode in which lokistack-gateway component will be configured. + // + // +required + // +kubebuilder:validation:Required + // +kubebuilder:default:=openshift-logging + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:static","urn:alm:descriptor:com.tectonic.ui:select:dynamic","urn:alm:descriptor:com.tectonic.ui:select:openshift-logging"},displayName="Mode" + Mode ModeType `json:"mode"` + // Authentication defines the lokistack-gateway component authentication configuration spec per tenant. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Authentication" + Authentication []AuthenticationSpec `json:"authentication,omitempty"` + // Authorization defines the lokistack-gateway component authorization configuration spec per tenant. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Authorization" + Authorization *AuthorizationSpec `json:"authorization,omitempty"` +} + +// LokiComponentSpec defines the requirements to configure scheduling +// of each loki component individually. +type LokiComponentSpec struct { + // Replicas defines the number of replica pods of the component. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:hidden" + Replicas int32 `json:"replicas,omitempty"` + + // NodeSelector defines the labels required by a node to schedule + // the component onto it. + // + // +optional + // +kubebuilder:validation:Optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations defines the tolerations required by a node to schedule + // the component onto it. + // + // +optional + // +kubebuilder:validation:Optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// LokiTemplateSpec defines the template of all requirements to configure +// scheduling of all Loki components to be deployed. +type LokiTemplateSpec struct { + + // Compactor defines the compaction component spec. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Compactor pods" + Compactor *LokiComponentSpec `json:"compactor,omitempty"` + + // Distributor defines the distributor component spec. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Distributor pods" + Distributor *LokiComponentSpec `json:"distributor,omitempty"` + + // Ingester defines the ingester component spec. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Ingester pods" + Ingester *LokiComponentSpec `json:"ingester,omitempty"` + + // Querier defines the querier component spec. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Querier pods" + Querier *LokiComponentSpec `json:"querier,omitempty"` + + // QueryFrontend defines the query frontend component spec. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Query Frontend pods" + QueryFrontend *LokiComponentSpec `json:"queryFrontend,omitempty"` + + // Gateway defines the lokistack gateway component spec. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Gateway pods" + Gateway *LokiComponentSpec `json:"gateway,omitempty"` + + // IndexGateway defines the index gateway component spec. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Index Gateway pods" + IndexGateway *LokiComponentSpec `json:"indexGateway,omitempty"` +} + +// ObjectStorageSecretSpec is a secret reference containing name only, no namespace. +type ObjectStorageSecretSpec struct { + // Name of a secret in the namespace configured for object storage secrets. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:Secret",displayName="Object Storage Secret" + Name string `json:"name"` +} + +// ObjectStorageSpec defines the requirements to access the object +// storage bucket to persist logs by the ingester component. +type ObjectStorageSpec struct { + // Secret for object storage authentication. + // Name of a secret in the same namespace as the cluster logging operator. + // + // +required + // +kubebuilder:validation:Required + Secret ObjectStorageSecretSpec `json:"secret"` +} + +// QueryLimitSpec defines the limits applies at the query path. +type QueryLimitSpec struct { + + // MaxEntriesLimitsPerQuery defines the maximum number of log entries + // that will be returned for a query. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Entries Limit per Query" + MaxEntriesLimitPerQuery int32 `json:"maxEntriesLimitPerQuery,omitempty"` + + // MaxChunksPerQuery defines the maximum number of chunks + // that can be fetched by a single query. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Chunk per Query" + MaxChunksPerQuery int32 `json:"maxChunksPerQuery,omitempty"` + + // MaxQuerySeries defines the the maximum of unique series + // that is returned by a metric query. + // + // + optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Query Series" + MaxQuerySeries int32 `json:"maxQuerySeries,omitempty"` +} + +// IngestionLimitSpec defines the limits applied at the ingestion path. +type IngestionLimitSpec struct { + + // IngestionRate defines the sample size per second. Units MB. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Ingestion Rate (in MB)" + IngestionRate int32 `json:"ingestionRate,omitempty"` + + // IngestionBurstSize defines the local rate-limited sample size per + // distributor replica. It should be set to the set at least to the + // maximum logs size expected in a single push request. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Ingestion Burst Size (in MB)" + IngestionBurstSize int32 `json:"ingestionBurstSize,omitempty"` + + // MaxLabelNameLength defines the maximum number of characters allowed + // for label keys in log streams. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Label Name Length" + MaxLabelNameLength int32 `json:"maxLabelNameLength,omitempty"` + + // MaxLabelValueLength defines the maximum number of characters allowed + // for label values in log streams. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Label Value Length" + MaxLabelValueLength int32 `json:"maxLabelValueLength,omitempty"` + + // MaxLabelNamesPerSeries defines the maximum number of label names per series + // in each log stream. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Labels Names per Series" + MaxLabelNamesPerSeries int32 `json:"maxLabelNamesPerSeries,omitempty"` + + // MaxGlobalStreamsPerTenant defines the maximum number of active streams + // per tenant, across the cluster. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Global Streams per Tenant" + MaxGlobalStreamsPerTenant int32 `json:"maxGlobalStreamsPerTenant,omitempty"` + + // MaxLineSize defines the maximum line size on ingestion path. Units in Bytes. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Line Size" + MaxLineSize int32 `json:"maxLineSize,omitempty"` +} + +// LimitsTemplateSpec defines the limits applied at ingestion or query path. +type LimitsTemplateSpec struct { + // IngestionLimits defines the limits applied on ingested log streams. + // + // +optional + // +kubebuilder:validation:Optional + IngestionLimits *IngestionLimitSpec `json:"ingestion,omitempty"` + + // QueryLimits defines the limit applied on querying log streams. + // + // +optional + // +kubebuilder:validation:Optional + QueryLimits *QueryLimitSpec `json:"queries,omitempty"` +} + +// LimitsSpec defines the spec for limits applied at ingestion or query +// path across the cluster or per tenant. +type LimitsSpec struct { + + // Global defines the limits applied globally across the cluster. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Global Limits" + Global *LimitsTemplateSpec `json:"global,omitempty"` + + // Tenants defines the limits applied per tenant. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Limits per Tenant" + Tenants map[string]LimitsTemplateSpec `json:"tenants,omitempty"` +} + +// LokiStackSpec defines the desired state of LokiStack +type LokiStackSpec struct { + + // ManagementState defines if the CR should be managed by the operator or not. + // Default is managed. + // + // +required + // +kubebuilder:validation:Required + // +kubebuilder:default:=Managed + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:Managed","urn:alm:descriptor:com.tectonic.ui:select:Unmanaged"},displayName="Management State" + ManagementState ManagementStateType `json:"managementState,omitempty"` + + // Size defines one of the support Loki deployment scale out sizes. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small","urn:alm:descriptor:com.tectonic.ui:select:1x.small","urn:alm:descriptor:com.tectonic.ui:select:1x.medium"},displayName="LokiStack Size" + Size LokiStackSizeType `json:"size"` + + // Storage defines the spec for the object storage endpoint to store logs. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Object Storage" + Storage ObjectStorageSpec `json:"storage"` + + // Storage class name defines the storage class for ingester/querier PVCs. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:StorageClass",displayName="Storage Class Name" + StorageClassName string `json:"storageClassName"` + + // ReplicationFactor defines the policy for log stream replication. + // + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum:=1 + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Replication Factor" + ReplicationFactor int32 `json:"replicationFactor"` + + // Limits defines the limits to be applied to log stream processing. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Rate Limiting" + Limits *LimitsSpec `json:"limits,omitempty"` + + // Template defines the resource/limits/tolerations/nodeselectors per component + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Node Placement" + Template *LokiTemplateSpec `json:"template,omitempty"` + + // Tenants defines the per-tenant authentication and authorization spec for the lokistack-gateway component. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenants Configuration" + Tenants *TenantsSpec `json:"tenants,omitempty"` +} + +// LokiStackConditionType deifnes the type of condition types of a Loki deployment. +type LokiStackConditionType string + +const ( + // ConditionReady defines the condition that all components in the Loki deployment are ready. + ConditionReady LokiStackConditionType = "Ready" + + // ConditionPending defines the conditioin that some or all components are in pending state. + ConditionPending LokiStackConditionType = "Pending" + + // ConditionFailed defines the condition that components in the Loki deployment failed to roll out. + ConditionFailed LokiStackConditionType = "Failed" + + // ConditionDegraded defines the condition that some or all components in the Loki deployment + // are degraded or the cluster cannot connect to object storage. + ConditionDegraded LokiStackConditionType = "Degraded" +) + +// LokiStackConditionReason defines the type for valid reasons of a Loki deployment conditions. +type LokiStackConditionReason string + +const ( + // ReasonFailedComponents when all/some LokiStack components fail to roll out. + ReasonFailedComponents LokiStackConditionReason = "FailedComponents" + // ReasonPendingComponents when all/some LokiStack components pending dependencies + ReasonPendingComponents LokiStackConditionReason = "PendingComponents" + // ReasonReadyComponents when all LokiStack components are ready to serve traffic. + ReasonReadyComponents LokiStackConditionReason = "ReadyComponents" + // ReasonMissingObjectStorageSecret when the required secret to store logs to object + // storage is missing. + ReasonMissingObjectStorageSecret LokiStackConditionReason = "MissingObjectStorageSecret" + // ReasonInvalidObjectStorageSecret when the format of the secret is invalid. + ReasonInvalidObjectStorageSecret LokiStackConditionReason = "InvalidObjectStorageSecret" + // ReasonInvalidReplicationConfiguration when the configurated replication factor is not valid + // with the select cluster size. + ReasonInvalidReplicationConfiguration LokiStackConditionReason = "InvalidReplicationConfiguration" + // ReasonMissingGatewayTenantSecret when the required tenant secret + // for authentication is missing. + ReasonMissingGatewayTenantSecret LokiStackConditionReason = "MissingGatewayTenantSecret" + // ReasonInvalidGatewayTenantSecret when the format of the secret is invalid. + ReasonInvalidGatewayTenantSecret LokiStackConditionReason = "InvalidGatewayTenantSecret" + // ReasonInvalidTenantsConfiguration when the tenant configuration provided is invalid. + ReasonInvalidTenantsConfiguration LokiStackConditionReason = "InvalidTenantsConfiguration" + // ReasonMissingGatewayOpenShiftBaseDomain when the reconciler cannot lookup the OpenShift DNS base domain. + ReasonMissingGatewayOpenShiftBaseDomain LokiStackConditionReason = "MissingGatewayOpenShiftBaseDomain" +) + +// PodStatusMap defines the type for mapping pod status to pod name. +type PodStatusMap map[corev1.PodPhase][]string + +// LokiStackComponentStatus defines the map of per pod status per LokiStack component. +// Each component is represented by a separate map of v1.Phase to a list of pods. +type LokiStackComponentStatus struct { + // Compactor is a map to the pod status of the compactor pod. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Compactor",order=5 + Compactor PodStatusMap `json:"compactor,omitempty"` + + // Distributor is a map to the per pod status of the distributor deployment + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Distributor",order=1 + Distributor PodStatusMap `json:"distributor,omitempty"` + + // IndexGateway is a map to the per pod status of the index gateway statefulset + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="IndexGateway",order=6 + IndexGateway PodStatusMap `json:"indexGateway,omitempty"` + + // Ingester is a map to the per pod status of the ingester statefulset + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Ingester",order=2 + Ingester PodStatusMap `json:"ingester,omitempty"` + + // Querier is a map to the per pod status of the querier deployment + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Querier",order=3 + Querier PodStatusMap `json:"querier,omitempty"` + + // QueryFrontend is a map to the per pod status of the query frontend deployment + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Query Frontend",order=4 + QueryFrontend PodStatusMap `json:"queryFrontend,omitempty"` + + // Gateway is a map to the per pod status of the lokistack gateway deployment. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Gateway",order=5 + Gateway PodStatusMap `json:"gateway,omitempty"` +} + +// LokiStackStatus defines the observed state of LokiStack +type LokiStackStatus struct { + // Components provides summary of all Loki pod status grouped + // per component. + // + // +optional + // +kubebuilder:validation:Optional + Components LokiStackComponentStatus `json:"components,omitempty"` + + // Conditions of the Loki deployment health. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:io.kubernetes.conditions" + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=logging + +// LokiStack is the Schema for the lokistacks API +// +// +operator-sdk:csv:customresourcedefinitions:displayName="LokiStack",resources={{Deployment,v1},{StatefulSet,v1},{ConfigMap,v1},{Ingress,v1},{Service,v1},{ServiceAccount,v1},{PersistentVolumeClaims,v1},{Route,v1},{ServiceMonitor,v1}} +type LokiStack struct { + Spec LokiStackSpec `json:"spec,omitempty"` + Status LokiStackStatus `json:"status,omitempty"` + metav1.ObjectMeta `json:"metadata,omitempty"` + metav1.TypeMeta `json:",inline"` +} + +// +kubebuilder:object:root=true + +// LokiStackList contains a list of LokiStack +type LokiStackList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LokiStack `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LokiStack{}, &LokiStackList{}) +} diff --git a/operator/api/v1beta1/zz_generated.deepcopy.go b/operator/api/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..81108bddc7 --- /dev/null +++ b/operator/api/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,666 @@ +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + if in.OIDC != nil { + in, out := &in.OIDC, &out.OIDC + *out = new(OIDCSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationSpec) DeepCopyInto(out *AuthorizationSpec) { + *out = *in + if in.OPA != nil { + in, out := &in.OPA, &out.OPA + *out = new(OPASpec) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]RoleSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoleBindings != nil { + in, out := &in.RoleBindings, &out.RoleBindings + *out = make([]RoleBindingsSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationSpec. +func (in *AuthorizationSpec) DeepCopy() *AuthorizationSpec { + if in == nil { + return nil + } + out := new(AuthorizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngestionLimitSpec) DeepCopyInto(out *IngestionLimitSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestionLimitSpec. +func (in *IngestionLimitSpec) DeepCopy() *IngestionLimitSpec { + if in == nil { + return nil + } + out := new(IngestionLimitSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitsSpec) DeepCopyInto(out *LimitsSpec) { + *out = *in + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(LimitsTemplateSpec) + (*in).DeepCopyInto(*out) + } + if in.Tenants != nil { + in, out := &in.Tenants, &out.Tenants + *out = make(map[string]LimitsTemplateSpec, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsSpec. +func (in *LimitsSpec) DeepCopy() *LimitsSpec { + if in == nil { + return nil + } + out := new(LimitsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitsTemplateSpec) DeepCopyInto(out *LimitsTemplateSpec) { + *out = *in + if in.IngestionLimits != nil { + in, out := &in.IngestionLimits, &out.IngestionLimits + *out = new(IngestionLimitSpec) + **out = **in + } + if in.QueryLimits != nil { + in, out := &in.QueryLimits, &out.QueryLimits + *out = new(QueryLimitSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsTemplateSpec. +func (in *LimitsTemplateSpec) DeepCopy() *LimitsTemplateSpec { + if in == nil { + return nil + } + out := new(LimitsTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LokiComponentSpec) DeepCopyInto(out *LokiComponentSpec) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiComponentSpec. +func (in *LokiComponentSpec) DeepCopy() *LokiComponentSpec { + if in == nil { + return nil + } + out := new(LokiComponentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LokiStack) DeepCopyInto(out *LokiStack) { + *out = *in + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.TypeMeta = in.TypeMeta +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStack. +func (in *LokiStack) DeepCopy() *LokiStack { + if in == nil { + return nil + } + out := new(LokiStack) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LokiStack) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LokiStackComponentStatus) DeepCopyInto(out *LokiStackComponentStatus) { + *out = *in + if in.Compactor != nil { + in, out := &in.Compactor, &out.Compactor + *out = make(PodStatusMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Distributor != nil { + in, out := &in.Distributor, &out.Distributor + *out = make(PodStatusMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.IndexGateway != nil { + in, out := &in.IndexGateway, &out.IndexGateway + *out = make(PodStatusMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Ingester != nil { + in, out := &in.Ingester, &out.Ingester + *out = make(PodStatusMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Querier != nil { + in, out := &in.Querier, &out.Querier + *out = make(PodStatusMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.QueryFrontend != nil { + in, out := &in.QueryFrontend, &out.QueryFrontend + *out = make(PodStatusMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = make(PodStatusMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackComponentStatus. +func (in *LokiStackComponentStatus) DeepCopy() *LokiStackComponentStatus { + if in == nil { + return nil + } + out := new(LokiStackComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LokiStackList) DeepCopyInto(out *LokiStackList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LokiStack, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackList. +func (in *LokiStackList) DeepCopy() *LokiStackList { + if in == nil { + return nil + } + out := new(LokiStackList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LokiStackList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LokiStackSpec) DeepCopyInto(out *LokiStackSpec) { + *out = *in + out.Storage = in.Storage + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(LimitsSpec) + (*in).DeepCopyInto(*out) + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(LokiTemplateSpec) + (*in).DeepCopyInto(*out) + } + if in.Tenants != nil { + in, out := &in.Tenants, &out.Tenants + *out = new(TenantsSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackSpec. +func (in *LokiStackSpec) DeepCopy() *LokiStackSpec { + if in == nil { + return nil + } + out := new(LokiStackSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LokiStackStatus) DeepCopyInto(out *LokiStackStatus) { + *out = *in + in.Components.DeepCopyInto(&out.Components) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackStatus. +func (in *LokiStackStatus) DeepCopy() *LokiStackStatus { + if in == nil { + return nil + } + out := new(LokiStackStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LokiTemplateSpec) DeepCopyInto(out *LokiTemplateSpec) { + *out = *in + if in.Compactor != nil { + in, out := &in.Compactor, &out.Compactor + *out = new(LokiComponentSpec) + (*in).DeepCopyInto(*out) + } + if in.Distributor != nil { + in, out := &in.Distributor, &out.Distributor + *out = new(LokiComponentSpec) + (*in).DeepCopyInto(*out) + } + if in.Ingester != nil { + in, out := &in.Ingester, &out.Ingester + *out = new(LokiComponentSpec) + (*in).DeepCopyInto(*out) + } + if in.Querier != nil { + in, out := &in.Querier, &out.Querier + *out = new(LokiComponentSpec) + (*in).DeepCopyInto(*out) + } + if in.QueryFrontend != nil { + in, out := &in.QueryFrontend, &out.QueryFrontend + *out = new(LokiComponentSpec) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(LokiComponentSpec) + (*in).DeepCopyInto(*out) + } + if in.IndexGateway != nil { + in, out := &in.IndexGateway, &out.IndexGateway + *out = new(LokiComponentSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiTemplateSpec. +func (in *LokiTemplateSpec) DeepCopy() *LokiTemplateSpec { + if in == nil { + return nil + } + out := new(LokiTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCSpec) DeepCopyInto(out *OIDCSpec) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(TenantSecretSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCSpec. +func (in *OIDCSpec) DeepCopy() *OIDCSpec { + if in == nil { + return nil + } + out := new(OIDCSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OPASpec) DeepCopyInto(out *OPASpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OPASpec. +func (in *OPASpec) DeepCopy() *OPASpec { + if in == nil { + return nil + } + out := new(OPASpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageSecretSpec) DeepCopyInto(out *ObjectStorageSecretSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSecretSpec. +func (in *ObjectStorageSecretSpec) DeepCopy() *ObjectStorageSecretSpec { + if in == nil { + return nil + } + out := new(ObjectStorageSecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec. +func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec { + if in == nil { + return nil + } + out := new(ObjectStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PodStatusMap) DeepCopyInto(out *PodStatusMap) { + { + in := &in + *out = make(PodStatusMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusMap. +func (in PodStatusMap) DeepCopy() PodStatusMap { + if in == nil { + return nil + } + out := new(PodStatusMap) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryLimitSpec) DeepCopyInto(out *QueryLimitSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryLimitSpec. +func (in *QueryLimitSpec) DeepCopy() *QueryLimitSpec { + if in == nil { + return nil + } + out := new(QueryLimitSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingsSpec) DeepCopyInto(out *RoleBindingsSpec) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingsSpec. +func (in *RoleBindingsSpec) DeepCopy() *RoleBindingsSpec { + if in == nil { + return nil + } + out := new(RoleBindingsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleSpec) DeepCopyInto(out *RoleSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Tenants != nil { + in, out := &in.Tenants, &out.Tenants + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]PermissionType, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleSpec. +func (in *RoleSpec) DeepCopy() *RoleSpec { + if in == nil { + return nil + } + out := new(RoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantSecretSpec) DeepCopyInto(out *TenantSecretSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantSecretSpec. +func (in *TenantSecretSpec) DeepCopy() *TenantSecretSpec { + if in == nil { + return nil + } + out := new(TenantSecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantsSpec) DeepCopyInto(out *TenantsSpec) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]AuthenticationSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(AuthorizationSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantsSpec. +func (in *TenantsSpec) DeepCopy() *TenantsSpec { + if in == nil { + return nil + } + out := new(TenantsSpec) + in.DeepCopyInto(out) + return out +} diff --git a/operator/bundle.Dockerfile b/operator/bundle.Dockerfile new file mode 100644 index 0000000000..cf0e7e463f --- /dev/null +++ b/operator/bundle.Dockerfile @@ -0,0 +1,20 @@ +FROM scratch + +# Core bundle labels. +LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 +LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ +LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ +LABEL operators.operatorframework.io.bundle.package.v1=loki-operator +LABEL operators.operatorframework.io.bundle.channels.v1=tech-preview +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-unknown +LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 +LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 + +# Labels for testing. +LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 +LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ + +# Copy files to locations specified by labels. +COPY bundle/manifests /manifests/ +COPY bundle/metadata /metadata/ +COPY bundle/tests/scorecard /tests/scorecard/ diff --git a/operator/bundle/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml b/operator/bundle/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml new file mode 100644 index 0000000000..98171122b1 --- /dev/null +++ b/operator/bundle/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.openshift.io/serving-cert-secret-name: loki-operator-metrics + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/version: 0.0.1 + name: loki-operator-controller-manager-metrics-service +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/version: 0.0.1 + name: loki-operator-controller-manager +status: + loadBalancer: {} diff --git a/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml new file mode 100644 index 0000000000..07051e4beb --- /dev/null +++ b/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +data: + controller_manager_config.yaml: | + apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + kind: ControllerManagerConfig + health: + healthProbeBindAddress: :8081 + metrics: + bindAddress: 127.0.0.1:8080 + webhook: + port: 9443 + leaderElection: + leaderElect: true + resourceName: e3716011.grafana.com +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/version: 0.0.1 + name: loki-operator-manager-config diff --git a/operator/bundle/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml b/operator/bundle/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml new file mode 100644 index 0000000000..0fc402c1c9 --- /dev/null +++ b/operator/bundle/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml @@ -0,0 +1,25 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/version: 0.0.1 + name: loki-operator + name: loki-operator-metrics-monitor +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + interval: 30s + path: /metrics + scheme: https + scrapeTimeout: 10s + targetPort: 8443 + tlsConfig: + caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + serverName: loki-operator-controller-manager-metrics-service.openshift-logging.svc + selector: + matchLabels: + app.kubernetes.io/name: loki-operator diff --git a/operator/bundle/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operator/bundle/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 0000000000..75f344d3f5 --- /dev/null +++ b/operator/bundle/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/version: 0.0.1 + name: loki-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml b/operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml new file mode 100644 index 0000000000..5bda0c5e3c --- /dev/null +++ b/operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml @@ -0,0 +1,25 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/version: 0.0.1 + name: loki-operator-prometheus +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch diff --git a/operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml b/operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml new file mode 100644 index 0000000000..0321b9c7ea --- /dev/null +++ b/operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml @@ -0,0 +1,22 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/version: 0.0.1 + name: loki-operator-prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: loki-operator-prometheus +subjects: +- kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml new file mode 100644 index 0000000000..97b67e2b66 --- /dev/null +++ b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml @@ -0,0 +1,701 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "loki.grafana.com/v1beta1", + "kind": "LokiStack", + "metadata": { + "name": "lokistack-sample" + }, + "spec": { + "replicationFactor": 2, + "size": "1x.small", + "storage": { + "secret": { + "name": "test" + } + }, + "storageClassName": "standard" + } + } + ] + capabilities: Full Lifecycle + categories: OpenShift Optional, Logging & Tracing + certified: "false" + containerImage: quay.io/openshift-logging/loki-operator:v0.0.1 + description: | + The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. + ## Prerequisites and Requirements + ### Loki Operator Namespace + The Loki Operator must be deployed to the global operator group namespace `openshift-logging`. + ### Memory Considerations + Loki is a memory intensive application. The initial + set of OCP nodes may not be large enough to support the Loki stack. Additional OCP nodes must be added + to the OCP cluster if you desire to run with the recommended (or better) memory. + olm.skipRange: '>=4.6.0-0 <5.4.0' + operatorframework.io/cluster-monitoring: "true" + operatorframework.io/suggested-namespace: openshift-logging + operators.operatorframework.io/builder: operator-sdk-unknown + operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + support: AOS Cluster Logging + labels: + operatorframework.io/arch.amd64: supported + operatorframework.io/arch.ppc64le: supported + operatorframework.io/arch.s390x: supported + name: loki-operator.v0.0.1 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: LokiStack is the Schema for the lokistacks API + displayName: LokiStack + kind: LokiStack + name: lokistacks.loki.grafana.com + resources: + - kind: ConfigMap + name: "" + version: v1 + - kind: Deployment + name: "" + version: v1 + - kind: Ingress + name: "" + version: v1 + - kind: PersistentVolumeClaims + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Service + name: "" + version: v1 + - kind: ServiceAccount + name: "" + version: v1 + - kind: ServiceMonitor + name: "" + version: v1 + - kind: StatefulSet + name: "" + version: v1 + specDescriptors: + - description: Limits defines the limits to be applied to log stream processing. + displayName: Rate Limiting + path: limits + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Global defines the limits applied globally across the cluster. + displayName: Global Limits + path: limits.global + - description: IngestionBurstSize defines the local rate-limited sample size + per distributor replica. It should be set to the set at least to the maximum + logs size expected in a single push request. + displayName: Ingestion Burst Size (in MB) + path: limits.global.ingestion.ingestionBurstSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: IngestionRate defines the sample size per second. Units MB. + displayName: Ingestion Rate (in MB) + path: limits.global.ingestion.ingestionRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxGlobalStreamsPerTenant defines the maximum number of active + streams per tenant, across the cluster. + displayName: Max Global Streams per Tenant + path: limits.global.ingestion.maxGlobalStreamsPerTenant + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNameLength defines the maximum number of characters allowed + for label keys in log streams. + displayName: Max Label Name Length + path: limits.global.ingestion.maxLabelNameLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNamesPerSeries defines the maximum number of label names + per series in each log stream. + displayName: Max Labels Names per Series + path: limits.global.ingestion.maxLabelNamesPerSeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelValueLength defines the maximum number of characters + allowed for label values in log streams. + displayName: Max Label Value Length + path: limits.global.ingestion.maxLabelValueLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLineSize defines the maximum line size on ingestion path. + Units in Bytes. + displayName: Max Line Size + path: limits.global.ingestion.maxLineSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxChunksPerQuery defines the maximum number of chunks that can + be fetched by a single query. + displayName: Max Chunk per Query + path: limits.global.queries.maxChunksPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries + that will be returned for a query. + displayName: Max Entries Limit per Query + path: limits.global.queries.maxEntriesLimitPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxQuerySeries defines the the maximum of unique series that + is returned by a metric query. + displayName: Max Query Series + path: limits.global.queries.maxQuerySeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Tenants defines the limits applied per tenant. + displayName: Limits per Tenant + path: limits.tenants + - description: IngestionBurstSize defines the local rate-limited sample size + per distributor replica. It should be set to the set at least to the maximum + logs size expected in a single push request. + displayName: Ingestion Burst Size (in MB) + path: limits.tenants.ingestion.ingestionBurstSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: IngestionRate defines the sample size per second. Units MB. + displayName: Ingestion Rate (in MB) + path: limits.tenants.ingestion.ingestionRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxGlobalStreamsPerTenant defines the maximum number of active + streams per tenant, across the cluster. + displayName: Max Global Streams per Tenant + path: limits.tenants.ingestion.maxGlobalStreamsPerTenant + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNameLength defines the maximum number of characters allowed + for label keys in log streams. + displayName: Max Label Name Length + path: limits.tenants.ingestion.maxLabelNameLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNamesPerSeries defines the maximum number of label names + per series in each log stream. + displayName: Max Labels Names per Series + path: limits.tenants.ingestion.maxLabelNamesPerSeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelValueLength defines the maximum number of characters + allowed for label values in log streams. + displayName: Max Label Value Length + path: limits.tenants.ingestion.maxLabelValueLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLineSize defines the maximum line size on ingestion path. + Units in Bytes. + displayName: Max Line Size + path: limits.tenants.ingestion.maxLineSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxChunksPerQuery defines the maximum number of chunks that can + be fetched by a single query. + displayName: Max Chunk per Query + path: limits.tenants.queries.maxChunksPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries + that will be returned for a query. + displayName: Max Entries Limit per Query + path: limits.tenants.queries.maxEntriesLimitPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxQuerySeries defines the the maximum of unique series that + is returned by a metric query. + displayName: Max Query Series + path: limits.tenants.queries.maxQuerySeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: ManagementState defines if the CR should be managed by the operator + or not. Default is managed. + displayName: Management State + path: managementState + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:Managed + - urn:alm:descriptor:com.tectonic.ui:select:Unmanaged + - description: ReplicationFactor defines the policy for log stream replication. + displayName: Replication Factor + path: replicationFactor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Size defines one of the support Loki deployment scale out sizes. + displayName: LokiStack Size + path: size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small + - urn:alm:descriptor:com.tectonic.ui:select:1x.small + - urn:alm:descriptor:com.tectonic.ui:select:1x.medium + - description: Storage defines the spec for the object storage endpoint to store + logs. + displayName: Object Storage + path: storage + - description: Name of a secret in the namespace configured for object storage + secrets. + displayName: Object Storage Secret + path: storage.secret.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: Storage class name defines the storage class for ingester/querier + PVCs. + displayName: Storage Class Name + path: storageClassName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:StorageClass + - description: Template defines the resource/limits/tolerations/nodeselectors + per component + displayName: Node Placement + path: template + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Compactor defines the compaction component spec. + displayName: Compactor pods + path: template.compactor + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.compactor.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Distributor defines the distributor component spec. + displayName: Distributor pods + path: template.distributor + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.distributor.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Gateway defines the lokistack gateway component spec. + displayName: Gateway pods + path: template.gateway + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.gateway.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: IndexGateway defines the index gateway component spec. + displayName: Index Gateway pods + path: template.indexGateway + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.indexGateway.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Ingester defines the ingester component spec. + displayName: Ingester pods + path: template.ingester + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.ingester.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Querier defines the querier component spec. + displayName: Querier pods + path: template.querier + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.querier.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: QueryFrontend defines the query frontend component spec. + displayName: Query Frontend pods + path: template.queryFrontend + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.queryFrontend.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Tenants defines the per-tenant authentication and authorization + spec for the lokistack-gateway component. + displayName: Tenants Configuration + path: tenants + - description: Authentication defines the lokistack-gateway component authentication + configuration spec per tenant. + displayName: Authentication + path: tenants.authentication + - description: OIDC defines the spec for the OIDC tenant's authentication. + displayName: OIDC Configuration + path: tenants.authentication[0].oidc + - description: IssuerURL defines the URL for issuer. + displayName: Issuer URL + path: tenants.authentication[0].oidc.issuerURL + - description: RedirectURL defines the URL for redirect. + displayName: Redirect URL + path: tenants.authentication[0].oidc.redirectURL + - description: Secret defines the spec for the clientID, clientSecret and issuerCAPath + for tenant's authentication. + displayName: Tenant Secret + path: tenants.authentication[0].oidc.secret + - description: Name of a secret in the namespace configured for tenant secrets. + displayName: Tenant Secret Name + path: tenants.authentication[0].oidc.secret.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: TenantID defines the id of the tenant. + displayName: Tenant ID + path: tenants.authentication[0].tenantId + - description: TenantName defines the name of the tenant. + displayName: Tenant Name + path: tenants.authentication[0].tenantName + - description: Authorization defines the lokistack-gateway component authorization + configuration spec per tenant. + displayName: Authorization + path: tenants.authorization + - description: OPA defines the spec for the third-party endpoint for tenant's + authorization. + displayName: OPA Configuration + path: tenants.authorization.opa + - description: URL defines the third-party endpoint for authorization. + displayName: OpenPolicyAgent URL + path: tenants.authorization.opa.url + - description: RoleBindings defines configuration to bind a set of roles to + a set of subjects. + displayName: Static Role Bindings + path: tenants.authorization.roleBindings + - description: Roles defines a set of permissions to interact with a tenant. + displayName: Static Roles + path: tenants.authorization.roles + - description: Mode defines the mode in which lokistack-gateway component will + be configured. + displayName: Mode + path: tenants.mode + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:static + - urn:alm:descriptor:com.tectonic.ui:select:dynamic + - urn:alm:descriptor:com.tectonic.ui:select:openshift-logging + statusDescriptors: + - description: Distributor is a map to the per pod status of the distributor + deployment + displayName: Distributor + path: components.distributor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Ingester is a map to the per pod status of the ingester statefulset + displayName: Ingester + path: components.ingester + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Querier is a map to the per pod status of the querier deployment + displayName: Querier + path: components.querier + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: QueryFrontend is a map to the per pod status of the query frontend + deployment + displayName: Query Frontend + path: components.queryFrontend + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Compactor is a map to the pod status of the compactor pod. + displayName: Compactor + path: components.compactor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Gateway is a map to the per pod status of the lokistack gateway + deployment. + displayName: Gateway + path: components.gateway + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: IndexGateway is a map to the per pod status of the index gateway + statefulset + displayName: IndexGateway + path: components.indexGateway + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Conditions of the Loki deployment health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 + description: | + The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. + ## Prerequisites and Requirements + ### Loki Operator Namespace + The Loki Operator must be deployed to the global operator group namespace `openshift-logging`. + ### Memory Considerations + Loki is a memory intensive application. The initial + set of OCP nodes may not be large enough to support the Loki cluster. Additional OCP nodes must be added + to the OCP cluster if you desire to run with the recommended (or better) memory. + displayName: Loki Operator + icon: + - base64data: PHN2ZyBpZD0iYWZiNDE1NDktYzU3MC00OWI3LTg1Y2QtNjU3NjAwZWRmMmUxIiBkYXRhLW5hbWU9IkxheWVyIDEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDcyMS4xNSA3MjEuMTUiPgogIDxkZWZzPgogICAgPHN0eWxlPgogICAgICAuYTQ0OGZkZWEtNGE0Yy00Njc4LTk3NmEtYzM3ODUzMDhhZTA2IHsKICAgICAgICBmaWxsOiAjZGIzOTI3OwogICAgICB9CgogICAgICAuZTEzMzA4YjgtNzQ4NS00Y2IwLTk3NjUtOGE1N2I5M2Y5MWE2IHsKICAgICAgICBmaWxsOiAjY2IzNzI4OwogICAgICB9CgogICAgICAuZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIHsKICAgICAgICBmaWxsOiAjZmZmOwogICAgICB9CgogICAgICAuYTA0MjBjYWMtZWJlNi00YzE4LWI5ODEtYWJiYTBiYTliMzY1IHsKICAgICAgICBmaWxsOiAjZTVlNWU0OwogICAgICB9CiAgICA8L3N0eWxlPgogIDwvZGVmcz4KICA8Y2lyY2xlIGNsYXNzPSJhNDQ4ZmRlYS00YTRjLTQ2NzgtOTc2YS1jMzc4NTMwOGFlMDYiIGN4PSIzNjAuNTgiIGN5PSIzNjAuNTgiIHI9IjM1OC4yOCIvPgogIDxwYXRoIGNsYXNzPSJlMTMzMDhiOC03NDg1LTRjYjAtOTc2NS04YTU3YjkzZjkxYTYiIGQ9Ik02MTMuNTQsMTA3LjMsMTA2Ljg4LDYxNGMxNDAsMTM4LjUxLDM2NS44MiwxMzguMDYsNTA1LjI2LTEuMzlTNzUyLDI0Ny4zMyw2MTMuNTQsMTA3LjNaIi8+CiAgPGc+CiAgICA8Y2lyY2xlIGNsYXNzPSJlNzcyODZmMS0yMmRhLTQ0ZDEtOGVmYi0xZDE0YjBjY2FlNjIiIGN4PSIyMzQuNyIgY3k9IjM1Ny4zIiByPSI0Ny43MiIvPgogICAgPGNpcmNsZSBjbGFzcz0iZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIiBjeD0iMjM0LjciIGN5PSIxODIuOTQiIHI9IjQ3LjcyIi8+CiAgICA8Y2lyY2xlIGNsYXNzPSJlNzcyODZmMS0yMmRhLTQ0ZDEtOGVmYi0xZDE0YjBjY2FlNjIiIGN4PSIyMzQuNyIgY3k9IjUzOC4yMSIgcj0iNDcuNzIiLz4KICA8L2c+CiAgPHBvbHlnb24gY2xhc3M9ImU3NzI4NmYxLTIyZGEtNDRkMS04ZWZiLTFkMTRiMGNjYWU2MiIgcG9pbnRzPSI0MzUuMTkgMzQ3LjMgMzkwLjU0IDM0Ny4zIDM5MC41NCAxNzIuOTQgMzE2LjE2IDE3Mi45NCAzMTYuMTYgMTkyLjk0IDM3MC41NCAxOTIuOTQgMzcwLjU0IDM0Ny4zIDMxNi4xNiAzNDcuMyAzMTYuMTYgMzY3LjMgMzcwLjU0IDM2Ny4zIDM3MC41NCA1MjEuNjcgMzE2LjE2IDUyMS42NyAzMTYuMTYgNTQxLjY3IDM5MC41NCA1NDEuNjcgMzkwLjU0IDM2Ny4zIDQzNS4xOSAzNjcuMyA0MzUuMTkgMzQ3LjMiLz4KICA8cG9seWdvbiBjbGFzcz0iZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIiBwb2ludHM9IjU5OS43NCAzMTcuMDMgNTU3Ljk3IDMxNy4wMyA1NTAuOTcgMzE3LjAzIDU1MC45NyAzMTAuMDMgNTUwLjk3IDI2OC4yNiA1NTAuOTcgMjY4LjI2IDQ2NC4zNiAyNjguMjYgNDY0LjM2IDQ0Ni4zNCA1OTkuNzQgNDQ2LjM0IDU5OS43NCAzMTcuMDMgNTk5Ljc0IDMxNy4wMyIvPgogIDxwb2x5Z29uIGNsYXNzPSJhMDQyMGNhYy1lYmU2LTRjMTgtYjk4MS1hYmJhMGJhOWIzNjUiIHBvaW50cz0iNTk5Ljc0IDMxMC4wMyA1NTcuOTcgMjY4LjI2IDU1Ny45NyAzMTAuMDMgNTk5Ljc0IDMxMC4wMyIvPgo8L3N2Zz4K + mediatype: image/svg+xml + install: + spec: + clusterPermissions: + - rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - config.openshift.io + resources: + - dnses + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - apiGroups: + - loki.grafana.com + resources: + - lokistacks + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - loki.grafana.com + resources: + - lokistacks/finalizers + verbs: + - update + - apiGroups: + - loki.grafana.com + resources: + - lokistacks/status + verbs: + - get + - patch + - update + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - get + - list + - update + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - get + - list + - update + - watch + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - create + - get + - list + - update + - watch + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + serviceAccountName: default + deployments: + - name: loki-operator-controller-manager + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/version: 0.0.1 + name: loki-operator-controller-manager + strategy: {} + template: + metadata: + labels: + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/version: 0.0.1 + name: loki-operator-controller-manager + spec: + containers: + - args: + - --with-lokistack-gateway + - --with-lokistack-gateway-route + - --with-cert-signing-service + - --with-service-monitors + - --with-tls-service-monitors + command: + - /manager + env: + - name: RELATED_IMAGE_LOKI + value: quay.io/openshift-logging/loki:v2.4.1 + - name: RELATED_IMAGE_GATEWAY + value: quay.io/observatorium/api:latest + - name: RELATED_IMAGE_OPA + value: quay.io/observatorium/opa-openshift:latest + image: quay.io/openshift-logging/loki-operator:v0.0.1 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + ports: + - containerPort: 8080 + name: metrics + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: {} + securityContext: + allowPrivilegeEscalation: false + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --tls-cert-file=/var/run/secrets/serving-cert/tls.crt + - --tls-private-key-file=/var/run/secrets/serving-cert/tls.key + - --v=2 + image: quay.io/openshift/origin-kube-rbac-proxy:latest + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + resources: {} + volumeMounts: + - mountPath: /var/run/secrets/serving-cert + name: loki-operator-metrics-cert + terminationGracePeriodSeconds: 10 + volumes: + - name: loki-operator-metrics-cert + secret: + defaultMode: 420 + optional: true + secretName: loki-operator-metrics + permissions: + - rules: + - apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: default + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - logging + - loki + links: + - name: Loki Operator + url: https://github.com/grafana/loki + maintainers: + - email: loki-operator-team@googlegroups.com + name: Grafana Loki SIG Operator + maturity: alpha + provider: + name: Grafana.com + version: 0.0.1 diff --git a/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml new file mode 100644 index 0000000000..1bb04ccee8 --- /dev/null +++ b/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml @@ -0,0 +1,951 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/version: 0.0.1 + name: lokistacks.loki.grafana.com +spec: + group: loki.grafana.com + names: + categories: + - logging + kind: LokiStack + listKind: LokiStackList + plural: lokistacks + singular: lokistack + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: LokiStack is the Schema for the lokistacks API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LokiStackSpec defines the desired state of LokiStack + properties: + limits: + description: Limits defines the limits to be applied to log stream + processing. + properties: + global: + description: Global defines the limits applied globally across + the cluster. + properties: + ingestion: + description: IngestionLimits defines the limits applied on + ingested log streams. + properties: + ingestionBurstSize: + description: IngestionBurstSize defines the local rate-limited + sample size per distributor replica. It should be set + to the set at least to the maximum logs size expected + in a single push request. + format: int32 + type: integer + ingestionRate: + description: IngestionRate defines the sample size per + second. Units MB. + format: int32 + type: integer + maxGlobalStreamsPerTenant: + description: MaxGlobalStreamsPerTenant defines the maximum + number of active streams per tenant, across the cluster. + format: int32 + type: integer + maxLabelNameLength: + description: MaxLabelNameLength defines the maximum number + of characters allowed for label keys in log streams. + format: int32 + type: integer + maxLabelNamesPerSeries: + description: MaxLabelNamesPerSeries defines the maximum + number of label names per series in each log stream. + format: int32 + type: integer + maxLabelValueLength: + description: MaxLabelValueLength defines the maximum number + of characters allowed for label values in log streams. + format: int32 + type: integer + maxLineSize: + description: MaxLineSize defines the maximum line size + on ingestion path. Units in Bytes. + format: int32 + type: integer + type: object + queries: + description: QueryLimits defines the limit applied on querying + log streams. + properties: + maxChunksPerQuery: + description: MaxChunksPerQuery defines the maximum number + of chunks that can be fetched by a single query. + format: int32 + type: integer + maxEntriesLimitPerQuery: + description: MaxEntriesLimitsPerQuery defines the maximum + number of log entries that will be returned for a query. + format: int32 + type: integer + maxQuerySeries: + description: MaxQuerySeries defines the the maximum of + unique series that is returned by a metric query. + format: int32 + type: integer + type: object + type: object + tenants: + additionalProperties: + description: LimitsTemplateSpec defines the limits applied + at ingestion or query path. + properties: + ingestion: + description: IngestionLimits defines the limits applied + on ingested log streams. + properties: + ingestionBurstSize: + description: IngestionBurstSize defines the local rate-limited + sample size per distributor replica. It should be + set to the set at least to the maximum logs size expected + in a single push request. + format: int32 + type: integer + ingestionRate: + description: IngestionRate defines the sample size per + second. Units MB. + format: int32 + type: integer + maxGlobalStreamsPerTenant: + description: MaxGlobalStreamsPerTenant defines the maximum + number of active streams per tenant, across the cluster. + format: int32 + type: integer + maxLabelNameLength: + description: MaxLabelNameLength defines the maximum + number of characters allowed for label keys in log + streams. + format: int32 + type: integer + maxLabelNamesPerSeries: + description: MaxLabelNamesPerSeries defines the maximum + number of label names per series in each log stream. + format: int32 + type: integer + maxLabelValueLength: + description: MaxLabelValueLength defines the maximum + number of characters allowed for label values in log + streams. + format: int32 + type: integer + maxLineSize: + description: MaxLineSize defines the maximum line size + on ingestion path. Units in Bytes. + format: int32 + type: integer + type: object + queries: + description: QueryLimits defines the limit applied on querying + log streams. + properties: + maxChunksPerQuery: + description: MaxChunksPerQuery defines the maximum number + of chunks that can be fetched by a single query. + format: int32 + type: integer + maxEntriesLimitPerQuery: + description: MaxEntriesLimitsPerQuery defines the maximum + number of log entries that will be returned for a + query. + format: int32 + type: integer + maxQuerySeries: + description: MaxQuerySeries defines the the maximum + of unique series that is returned by a metric query. + format: int32 + type: integer + type: object + type: object + description: Tenants defines the limits applied per tenant. + type: object + type: object + managementState: + default: Managed + description: ManagementState defines if the CR should be managed by + the operator or not. Default is managed. + enum: + - Managed + - Unmanaged + type: string + replicationFactor: + description: ReplicationFactor defines the policy for log stream replication. + format: int32 + minimum: 1 + type: integer + size: + description: Size defines one of the support Loki deployment scale + out sizes. + enum: + - 1x.extra-small + - 1x.small + - 1x.medium + type: string + storage: + description: Storage defines the spec for the object storage endpoint + to store logs. + properties: + secret: + description: Secret for object storage authentication. Name of + a secret in the same namespace as the cluster logging operator. + properties: + name: + description: Name of a secret in the namespace configured + for object storage secrets. + type: string + required: + - name + type: object + required: + - secret + type: object + storageClassName: + description: Storage class name defines the storage class for ingester/querier + PVCs. + type: string + template: + description: Template defines the resource/limits/tolerations/nodeselectors + per component + properties: + compactor: + description: Compactor defines the compaction component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + distributor: + description: Distributor defines the distributor component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + gateway: + description: Gateway defines the lokistack gateway component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + indexGateway: + description: IndexGateway defines the index gateway component + spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + ingester: + description: Ingester defines the ingester component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + querier: + description: Querier defines the querier component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + queryFrontend: + description: QueryFrontend defines the query frontend component + spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + tenants: + description: Tenants defines the per-tenant authentication and authorization + spec for the lokistack-gateway component. + properties: + authentication: + description: Authentication defines the lokistack-gateway component + authentication configuration spec per tenant. + items: + description: AuthenticationSpec defines the oidc configuration + per tenant for lokiStack Gateway component. + properties: + oidc: + description: OIDC defines the spec for the OIDC tenant's + authentication. + properties: + groupClaim: + type: string + issuerURL: + description: IssuerURL defines the URL for issuer. + type: string + redirectURL: + description: RedirectURL defines the URL for redirect. + type: string + secret: + description: Secret defines the spec for the clientID, + clientSecret and issuerCAPath for tenant's authentication. + properties: + name: + description: Name of a secret in the namespace configured + for tenant secrets. + type: string + required: + - name + type: object + usernameClaim: + type: string + required: + - groupClaim + - issuerURL + - redirectURL + - secret + - usernameClaim + type: object + tenantId: + description: TenantID defines the id of the tenant. + type: string + tenantName: + description: TenantName defines the name of the tenant. + type: string + required: + - oidc + - tenantId + - tenantName + type: object + type: array + authorization: + description: Authorization defines the lokistack-gateway component + authorization configuration spec per tenant. + properties: + opa: + description: OPA defines the spec for the third-party endpoint + for tenant's authorization. + properties: + url: + description: URL defines the third-party endpoint for + authorization. + type: string + required: + - url + type: object + roleBindings: + description: RoleBindings defines configuration to bind a + set of roles to a set of subjects. + items: + description: RoleBindingsSpec binds a set of roles to a + set of subjects. + properties: + name: + type: string + roles: + items: + type: string + type: array + subjects: + items: + description: Subject represents a subject that has + been bound to a role. + properties: + kind: + description: SubjectKind is a kind of LokiStack + Gateway RBAC subject. + enum: + - user + - group + type: string + name: + type: string + required: + - kind + - name + type: object + type: array + required: + - name + - roles + - subjects + type: object + type: array + roles: + description: Roles defines a set of permissions to interact + with a tenant. + items: + description: RoleSpec describes a set of permissions to + interact with a tenant. + properties: + name: + type: string + permissions: + items: + description: PermissionType is a LokiStack Gateway + RBAC permission. + enum: + - read + - write + type: string + type: array + resources: + items: + type: string + type: array + tenants: + items: + type: string + type: array + required: + - name + - permissions + - resources + - tenants + type: object + type: array + type: object + mode: + default: openshift-logging + description: Mode defines the mode in which lokistack-gateway + component will be configured. + enum: + - static + - dynamic + - openshift-logging + type: string + required: + - mode + type: object + required: + - replicationFactor + - size + - storage + - storageClassName + type: object + status: + description: LokiStackStatus defines the observed state of LokiStack + properties: + components: + description: Components provides summary of all Loki pod status grouped + per component. + properties: + compactor: + additionalProperties: + items: + type: string + type: array + description: Compactor is a map to the pod status of the compactor + pod. + type: object + distributor: + additionalProperties: + items: + type: string + type: array + description: Distributor is a map to the per pod status of the + distributor deployment + type: object + gateway: + additionalProperties: + items: + type: string + type: array + description: Gateway is a map to the per pod status of the lokistack + gateway deployment. + type: object + indexGateway: + additionalProperties: + items: + type: string + type: array + description: IndexGateway is a map to the per pod status of the + index gateway statefulset + type: object + ingester: + additionalProperties: + items: + type: string + type: array + description: Ingester is a map to the per pod status of the ingester + statefulset + type: object + querier: + additionalProperties: + items: + type: string + type: array + description: Querier is a map to the per pod status of the querier + deployment + type: object + queryFrontend: + additionalProperties: + items: + type: string + type: array + description: QueryFrontend is a map to the per pod status of the + query frontend deployment + type: object + type: object + conditions: + description: Conditions of the Loki deployment health. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operator/bundle/metadata/annotations.yaml b/operator/bundle/metadata/annotations.yaml new file mode 100644 index 0000000000..60f85ac976 --- /dev/null +++ b/operator/bundle/metadata/annotations.yaml @@ -0,0 +1,14 @@ +annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: loki-operator + operators.operatorframework.io.bundle.channels.v1: tech-preview + operators.operatorframework.io.metrics.builder: operator-sdk-unknown + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + + # Annotations for testing. + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ diff --git a/operator/bundle/tests/scorecard/config.yaml b/operator/bundle/tests/scorecard/config.yaml new file mode 100644 index 0000000000..fde2af8b26 --- /dev/null +++ b/operator/bundle/tests/scorecard/config.yaml @@ -0,0 +1,70 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: + - entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: basic + test: basic-check-spec-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-bundle-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-crds-have-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-crds-have-resources-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-spec-descriptors-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-status-descriptors-test + storage: + spec: + mountPath: {} +storage: + spec: + mountPath: {} diff --git a/operator/calculator.Dockerfile b/operator/calculator.Dockerfile new file mode 100644 index 0000000000..2f41b1babd --- /dev/null +++ b/operator/calculator.Dockerfile @@ -0,0 +1,26 @@ +# Build the calculator binary +FROM golang:1.16 as builder + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/size-calculator/main.go main.go +COPY internal/ internal/ + +# Build +RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -a -o size-calculator main.go + +# Use distroless as minimal base image to package the size-calculator binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/size-calculator . +USER 65532:65532 + +ENTRYPOINT ["/size-calculator"] diff --git a/operator/cmd/loki-broker/main.go b/operator/cmd/loki-broker/main.go new file mode 100644 index 0000000000..572de10cac --- /dev/null +++ b/operator/cmd/loki-broker/main.go @@ -0,0 +1,152 @@ +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/ViaQ/logerr/log" + "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests" + "sigs.k8s.io/yaml" +) + +// Define the manifest options here as structured objects +type config struct { + Name string + Namespace string + Image string + + featureFlags manifests.FeatureFlags + objectStorage manifests.ObjectStorage + + crFilepath string + writeToDir string +} + +func (c *config) registerFlags(f *flag.FlagSet) { + // LokiStack metadata options + f.StringVar(&c.Name, "name", "", "The name of the stack") + f.StringVar(&c.Namespace, "namespace", "", "Namespace to deploy to") + f.StringVar(&c.Image, "image", manifests.DefaultContainerImage, "The Loki image pull spec loation.") + // Feature flags + c.featureFlags = manifests.FeatureFlags{} + f.BoolVar(&c.featureFlags.EnableCertificateSigningService, "with-cert-signing-service", false, "Enable usage of cert-signing service for scraping prometheus metrics via TLS.") + f.BoolVar(&c.featureFlags.EnableServiceMonitors, "with-service-monitors", false, "Enable service monitors for all LokiStack components.") + f.BoolVar(&c.featureFlags.EnableTLSServiceMonitorConfig, "with-tls-service-monitors", false, "Enable TLS endpoint for service monitors.") + f.BoolVar(&c.featureFlags.EnableGateway, "with-lokistack-gateway", false, "Enables the manifest creation for the entire lokistack-gateway.") + // Object storage options + c.objectStorage = manifests.ObjectStorage{} + f.StringVar(&c.objectStorage.Endpoint, "object-storage.endpoint", "", "The S3 endpoint location.") + f.StringVar(&c.objectStorage.Buckets, "object-storage.buckets", "", "A comma-separated list of S3 buckets.") + f.StringVar(&c.objectStorage.Region, "object-storage.region", "", "An S3 region.") + f.StringVar(&c.objectStorage.AccessKeyID, "object-storage.access-key-id", "", "The access key id for S3.") + f.StringVar(&c.objectStorage.AccessKeySecret, "object-storage.access-key-secret", "", "The access key secret for S3.") + // Input and output file/dir options + f.StringVar(&c.crFilepath, "custom-resource.path", "", "Path to a custom resource YAML file.") + f.StringVar(&c.writeToDir, "output.write-dir", "", "write each file to the specified directory.") +} + +func (c *config) validateFlags() { + if cfg.crFilepath == "" { + log.Info("-custom.resource.path flag is required") + os.Exit(1) + } + if cfg.Name == "" { + log.Info("-name flag is required") + os.Exit(1) + } + if cfg.Namespace == "" { + log.Info("-namespace flag is required") + os.Exit(1) + } + // Validate manifests.objectStorage + if cfg.objectStorage.Endpoint == "" { + log.Info("-object.storage.endpoint flag is required") + os.Exit(1) + } + if cfg.objectStorage.Buckets == "" { + log.Info("-object.storage.buckets flag is required") + os.Exit(1) + } + if cfg.objectStorage.AccessKeyID == "" { + log.Info("-object.storage.access.key.id flag is required") + os.Exit(1) + } + if cfg.objectStorage.AccessKeySecret == "" { + log.Info("-object.storage.access.key.secret flag is required") + os.Exit(1) + } +} + +var cfg *config + +func init() { + log.Init("loki-broker") + cfg = &config{} +} + +func main() { + f := flag.NewFlagSet("", flag.ExitOnError) + cfg.registerFlags(f) + if err := f.Parse(os.Args[1:]); err != nil { + log.Error(err, "failed to parse flags") + } + + cfg.validateFlags() + + b, err := ioutil.ReadFile(cfg.crFilepath) + if err != nil { + log.Info("failed to read custom resource file", "path", cfg.crFilepath) + os.Exit(1) + } + + ls := &v1beta1.LokiStack{} + if err = yaml.Unmarshal(b, ls); err != nil { + log.Error(err, "failed to unmarshal LokiStack CR", "path", cfg.crFilepath) + os.Exit(1) + } + + // Convert config to manifest.Options + opts := manifests.Options{ + Name: cfg.Name, + Namespace: cfg.Namespace, + Image: cfg.Image, + Stack: ls.Spec, + Flags: cfg.featureFlags, + ObjectStorage: cfg.objectStorage, + } + + if optErr := manifests.ApplyDefaultSettings(&opts); optErr != nil { + log.Error(optErr, "failed to conform options to build settings") + os.Exit(1) + } + + objects, err := manifests.BuildAll(opts) + if err != nil { + log.Error(err, "failed to build manifests") + os.Exit(1) + } + + for _, o := range objects { + b, err := yaml.Marshal(o) + if err != nil { + log.Error(err, "failed to marshal manifest", "name", o.GetName(), "kind", o.GetObjectKind()) + continue + } + + if cfg.writeToDir != "" { + basename := fmt.Sprintf("%s-%s.yaml", o.GetObjectKind().GroupVersionKind().Kind, o.GetName()) + fname := strings.ToLower(path.Join(cfg.writeToDir, basename)) + if err := ioutil.WriteFile(fname, b, 0o644); err != nil { + log.Error(err, "failed to write file to directory", "path", fname) + os.Exit(1) + } + } else { + fmt.Fprintf(os.Stdout, "---\n%s", b) + } + } +} diff --git a/operator/cmd/size-calculator/main.go b/operator/cmd/size-calculator/main.go new file mode 100755 index 0000000000..ccac06c3c2 --- /dev/null +++ b/operator/cmd/size-calculator/main.go @@ -0,0 +1,63 @@ +package main + +import ( + "fmt" + "math" + "os" + "time" + + "github.com/grafana/loki-operator/internal/sizes" + "github.com/prometheus/common/model" + + "github.com/ViaQ/logerr/log" +) + +const ( + // defaultDuration is the time for which the metric needs to be predicted for. + // It is passed as second parameter to predict_linear. + defaultDuration string = "24h" + // range1xSmall defines the range (in GB) + // of t-shirt size 1x.small i.e., 0 <= 1x.small <= 500 + range1xSmall int = 500 + // sizeOneXSmall defines the size of a single Loki deployment + // with small resources/limits requirements. This size is dedicated for setup **without** the + // requirement for single replication factor and auto-compaction. + sizeOneXSmall string = "1x.small" + // sizeOneXMedium defines the size of a single Loki deployment + // with small resources/limits requirements. This size is dedicated for setup **with** the + // requirement for single replication factor and auto-compaction. + sizeOneXMedium string = "1x.medium" +) + +func init() { + log.Init("size-calculator") +} + +func main() { + log.Info("starting storage size calculator...") + + for { + duration, parseErr := model.ParseDuration(defaultDuration) + if parseErr != nil { + log.Error(parseErr, "failed to parse duration") + os.Exit(1) + } + + logsCollected, err := sizes.PredictFor(duration) + if err != nil { + log.Error(err, "Failed to collect metrics data") + os.Exit(1) + } + + logsCollectedInGB := int(math.Ceil(logsCollected / math.Pow(1024, 3))) + log.Info(fmt.Sprintf("Amount of logs expected in 24 hours is %f Bytes or %dGB", logsCollected, logsCollectedInGB)) + + if logsCollectedInGB <= range1xSmall { + log.Info(fmt.Sprintf("Recommended t-shirt size for %dGB is %s", logsCollectedInGB, sizeOneXSmall)) + } else { + log.Info(fmt.Sprintf("Recommended t-shirt size for %dGB is %s", logsCollectedInGB, sizeOneXMedium)) + } + + time.Sleep(1 * time.Minute) + } +} diff --git a/operator/config/certmanager/certificate.yaml b/operator/config/certmanager/certificate.yaml new file mode 100644 index 0000000000..52d866183c --- /dev/null +++ b/operator/config/certmanager/certificate.yaml @@ -0,0 +1,25 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/operator/config/certmanager/kustomization.yaml b/operator/config/certmanager/kustomization.yaml new file mode 100644 index 0000000000..bebea5a595 --- /dev/null +++ b/operator/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/operator/config/certmanager/kustomizeconfig.yaml b/operator/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 0000000000..90d7c313ca --- /dev/null +++ b/operator/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,16 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: cert-manager.io + path: spec/commonName +- kind: Certificate + group: cert-manager.io + path: spec/dnsNames diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml new file mode 100644 index 0000000000..fd67db9bef --- /dev/null +++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml @@ -0,0 +1,695 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: lokistacks.loki.grafana.com +spec: + group: loki.grafana.com + names: + categories: + - logging + kind: LokiStack + listKind: LokiStackList + plural: lokistacks + singular: lokistack + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: LokiStack is the Schema for the lokistacks API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LokiStackSpec defines the desired state of LokiStack + properties: + limits: + description: Limits defines the limits to be applied to log stream processing. + properties: + global: + description: Global defines the limits applied globally across the cluster. + properties: + ingestion: + description: IngestionLimits defines the limits applied on ingested log streams. + properties: + ingestionBurstSize: + description: IngestionBurstSize defines the local rate-limited sample size per distributor replica. It should be set to the set at least to the maximum logs size expected in a single push request. + format: int32 + type: integer + ingestionRate: + description: IngestionRate defines the sample size per second. Units MB. + format: int32 + type: integer + maxGlobalStreamsPerTenant: + description: MaxGlobalStreamsPerTenant defines the maximum number of active streams per tenant, across the cluster. + format: int32 + type: integer + maxLabelNameLength: + description: MaxLabelNameLength defines the maximum number of characters allowed for label keys in log streams. + format: int32 + type: integer + maxLabelNamesPerSeries: + description: MaxLabelNamesPerSeries defines the maximum number of label names per series in each log stream. + format: int32 + type: integer + maxLabelValueLength: + description: MaxLabelValueLength defines the maximum number of characters allowed for label values in log streams. + format: int32 + type: integer + maxLineSize: + description: MaxLineSize defines the maximum line size on ingestion path. Units in Bytes. + format: int32 + type: integer + type: object + queries: + description: QueryLimits defines the limit applied on querying log streams. + properties: + maxChunksPerQuery: + description: MaxChunksPerQuery defines the maximum number of chunks that can be fetched by a single query. + format: int32 + type: integer + maxEntriesLimitPerQuery: + description: MaxEntriesLimitsPerQuery defines the maximum number of log entries that will be returned for a query. + format: int32 + type: integer + maxQuerySeries: + description: MaxQuerySeries defines the the maximum of unique series that is returned by a metric query. + format: int32 + type: integer + type: object + type: object + tenants: + additionalProperties: + description: LimitsTemplateSpec defines the limits applied at ingestion or query path. + properties: + ingestion: + description: IngestionLimits defines the limits applied on ingested log streams. + properties: + ingestionBurstSize: + description: IngestionBurstSize defines the local rate-limited sample size per distributor replica. It should be set to the set at least to the maximum logs size expected in a single push request. + format: int32 + type: integer + ingestionRate: + description: IngestionRate defines the sample size per second. Units MB. + format: int32 + type: integer + maxGlobalStreamsPerTenant: + description: MaxGlobalStreamsPerTenant defines the maximum number of active streams per tenant, across the cluster. + format: int32 + type: integer + maxLabelNameLength: + description: MaxLabelNameLength defines the maximum number of characters allowed for label keys in log streams. + format: int32 + type: integer + maxLabelNamesPerSeries: + description: MaxLabelNamesPerSeries defines the maximum number of label names per series in each log stream. + format: int32 + type: integer + maxLabelValueLength: + description: MaxLabelValueLength defines the maximum number of characters allowed for label values in log streams. + format: int32 + type: integer + maxLineSize: + description: MaxLineSize defines the maximum line size on ingestion path. Units in Bytes. + format: int32 + type: integer + type: object + queries: + description: QueryLimits defines the limit applied on querying log streams. + properties: + maxChunksPerQuery: + description: MaxChunksPerQuery defines the maximum number of chunks that can be fetched by a single query. + format: int32 + type: integer + maxEntriesLimitPerQuery: + description: MaxEntriesLimitsPerQuery defines the maximum number of log entries that will be returned for a query. + format: int32 + type: integer + maxQuerySeries: + description: MaxQuerySeries defines the the maximum of unique series that is returned by a metric query. + format: int32 + type: integer + type: object + type: object + description: Tenants defines the limits applied per tenant. + type: object + type: object + managementState: + default: Managed + description: ManagementState defines if the CR should be managed by the operator or not. Default is managed. + enum: + - Managed + - Unmanaged + type: string + replicationFactor: + description: ReplicationFactor defines the policy for log stream replication. + format: int32 + minimum: 1 + type: integer + size: + description: Size defines one of the support Loki deployment scale out sizes. + enum: + - 1x.extra-small + - 1x.small + - 1x.medium + type: string + storage: + description: Storage defines the spec for the object storage endpoint to store logs. + properties: + secret: + description: Secret for object storage authentication. Name of a secret in the same namespace as the cluster logging operator. + properties: + name: + description: Name of a secret in the namespace configured for object storage secrets. + type: string + required: + - name + type: object + required: + - secret + type: object + storageClassName: + description: Storage class name defines the storage class for ingester/querier PVCs. + type: string + template: + description: Template defines the resource/limits/tolerations/nodeselectors per component + properties: + compactor: + description: Compactor defines the compaction component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + distributor: + description: Distributor defines the distributor component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + gateway: + description: Gateway defines the lokistack gateway component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + indexGateway: + description: IndexGateway defines the index gateway component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + ingester: + description: Ingester defines the ingester component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + querier: + description: Querier defines the querier component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + queryFrontend: + description: QueryFrontend defines the query frontend component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + tenants: + description: Tenants defines the per-tenant authentication and authorization spec for the lokistack-gateway component. + properties: + authentication: + description: Authentication defines the lokistack-gateway component authentication configuration spec per tenant. + items: + description: AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component. + properties: + oidc: + description: OIDC defines the spec for the OIDC tenant's authentication. + properties: + groupClaim: + type: string + issuerURL: + description: IssuerURL defines the URL for issuer. + type: string + redirectURL: + description: RedirectURL defines the URL for redirect. + type: string + secret: + description: Secret defines the spec for the clientID, clientSecret and issuerCAPath for tenant's authentication. + properties: + name: + description: Name of a secret in the namespace configured for tenant secrets. + type: string + required: + - name + type: object + usernameClaim: + type: string + required: + - groupClaim + - issuerURL + - redirectURL + - secret + - usernameClaim + type: object + tenantId: + description: TenantID defines the id of the tenant. + type: string + tenantName: + description: TenantName defines the name of the tenant. + type: string + required: + - oidc + - tenantId + - tenantName + type: object + type: array + authorization: + description: Authorization defines the lokistack-gateway component authorization configuration spec per tenant. + properties: + opa: + description: OPA defines the spec for the third-party endpoint for tenant's authorization. + properties: + url: + description: URL defines the third-party endpoint for authorization. + type: string + required: + - url + type: object + roleBindings: + description: RoleBindings defines configuration to bind a set of roles to a set of subjects. + items: + description: RoleBindingsSpec binds a set of roles to a set of subjects. + properties: + name: + type: string + roles: + items: + type: string + type: array + subjects: + items: + description: Subject represents a subject that has been bound to a role. + properties: + kind: + description: SubjectKind is a kind of LokiStack Gateway RBAC subject. + enum: + - user + - group + type: string + name: + type: string + required: + - kind + - name + type: object + type: array + required: + - name + - roles + - subjects + type: object + type: array + roles: + description: Roles defines a set of permissions to interact with a tenant. + items: + description: RoleSpec describes a set of permissions to interact with a tenant. + properties: + name: + type: string + permissions: + items: + description: PermissionType is a LokiStack Gateway RBAC permission. + enum: + - read + - write + type: string + type: array + resources: + items: + type: string + type: array + tenants: + items: + type: string + type: array + required: + - name + - permissions + - resources + - tenants + type: object + type: array + type: object + mode: + default: openshift-logging + description: Mode defines the mode in which lokistack-gateway component will be configured. + enum: + - static + - dynamic + - openshift-logging + type: string + required: + - mode + type: object + required: + - replicationFactor + - size + - storage + - storageClassName + type: object + status: + description: LokiStackStatus defines the observed state of LokiStack + properties: + components: + description: Components provides summary of all Loki pod status grouped per component. + properties: + compactor: + additionalProperties: + items: + type: string + type: array + description: Compactor is a map to the pod status of the compactor pod. + type: object + distributor: + additionalProperties: + items: + type: string + type: array + description: Distributor is a map to the per pod status of the distributor deployment + type: object + gateway: + additionalProperties: + items: + type: string + type: array + description: Gateway is a map to the per pod status of the lokistack gateway deployment. + type: object + indexGateway: + additionalProperties: + items: + type: string + type: array + description: IndexGateway is a map to the per pod status of the index gateway statefulset + type: object + ingester: + additionalProperties: + items: + type: string + type: array + description: Ingester is a map to the per pod status of the ingester statefulset + type: object + querier: + additionalProperties: + items: + type: string + type: array + description: Querier is a map to the per pod status of the querier deployment + type: object + queryFrontend: + additionalProperties: + items: + type: string + type: array + description: QueryFrontend is a map to the per pod status of the query frontend deployment + type: object + type: object + conditions: + description: Conditions of the Loki deployment health. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operator/config/crd/kustomization.yaml b/operator/config/crd/kustomization.yaml new file mode 100644 index 0000000000..b92b3cc3c3 --- /dev/null +++ b/operator/config/crd/kustomization.yaml @@ -0,0 +1,21 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/loki.grafana.com_lokistacks.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_lokistacks.yaml +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_lokistacks.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/operator/config/crd/kustomizeconfig.yaml b/operator/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000000..bcebe0475b --- /dev/null +++ b/operator/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1beta1 + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1beta1 + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/operator/config/crd/patches/cainjection_in_lokistacks.yaml b/operator/config/crd/patches/cainjection_in_lokistacks.yaml new file mode 100644 index 0000000000..bacdd76044 --- /dev/null +++ b/operator/config/crd/patches/cainjection_in_lokistacks.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: lokistacks.loki.grafana.com diff --git a/operator/config/crd/patches/webhook_in_lokistacks.yaml b/operator/config/crd/patches/webhook_in_lokistacks.yaml new file mode 100644 index 0000000000..c9750f0e9a --- /dev/null +++ b/operator/config/crd/patches/webhook_in_lokistacks.yaml @@ -0,0 +1,14 @@ +# The following patch enables a conversion webhook for the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: lokistacks.loki.grafana.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + service: + namespace: system + name: webhook-service + path: /convert diff --git a/operator/config/manager/controller_manager_config.yaml b/operator/config/manager/controller_manager_config.yaml new file mode 100644 index 0000000000..659dc1180c --- /dev/null +++ b/operator/config/manager/controller_manager_config.yaml @@ -0,0 +1,11 @@ +apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 +kind: ControllerManagerConfig +health: + healthProbeBindAddress: :8081 +metrics: + bindAddress: 127.0.0.1:8080 +webhook: + port: 9443 +leaderElection: + leaderElect: true + resourceName: e3716011.grafana.com diff --git a/operator/config/manager/kustomization.yaml b/operator/config/manager/kustomization.yaml new file mode 100644 index 0000000000..8b75d369c6 --- /dev/null +++ b/operator/config/manager/kustomization.yaml @@ -0,0 +1,16 @@ +resources: +- manager.yaml + +generatorOptions: + disableNameSuffixHash: true + +configMapGenerator: +- files: + - controller_manager_config.yaml + name: manager-config +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: quay.io/openshift-logging/loki-operator + newTag: v0.0.1 diff --git a/operator/config/manager/manager.yaml b/operator/config/manager/manager.yaml new file mode 100644 index 0000000000..588f3d72ff --- /dev/null +++ b/operator/config/manager/manager.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + selector: + matchLabels: + name: loki-operator-controller-manager + replicas: 1 + template: + metadata: + labels: + name: loki-operator-controller-manager + spec: + containers: + - command: + - /manager + image: controller:latest + imagePullPolicy: IfNotPresent + name: manager + ports: + - containerPort: 8080 + name: metrics + securityContext: + allowPrivilegeEscalation: false + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + terminationGracePeriodSeconds: 10 diff --git a/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml new file mode 100644 index 0000000000..9c6a505b81 --- /dev/null +++ b/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml @@ -0,0 +1,436 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: '[]' + capabilities: Full Lifecycle + categories: OpenShift Optional, Logging & Tracing + certified: "false" + containerImage: quay.io/openshift-logging/loki-operator:v0.0.1 + description: | + The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. + ## Prerequisites and Requirements + ### Loki Operator Namespace + The Loki Operator must be deployed to the global operator group namespace `openshift-logging`. + ### Memory Considerations + Loki is a memory intensive application. The initial + set of OCP nodes may not be large enough to support the Loki stack. Additional OCP nodes must be added + to the OCP cluster if you desire to run with the recommended (or better) memory. + olm.skipRange: '>=4.6.0-0 <5.4.0' + operatorframework.io/cluster-monitoring: "true" + operatorframework.io/suggested-namespace: openshift-logging + support: AOS Cluster Logging + labels: + operatorframework.io/arch.amd64: supported + operatorframework.io/arch.ppc64le: supported + operatorframework.io/arch.s390x: supported + name: loki-operator.v0.0.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: LokiStack is the Schema for the lokistacks API + displayName: LokiStack + kind: LokiStack + name: lokistacks.loki.grafana.com + resources: + - kind: ConfigMap + name: "" + version: v1 + - kind: Deployment + name: "" + version: v1 + - kind: Ingress + name: "" + version: v1 + - kind: PersistentVolumeClaims + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Service + name: "" + version: v1 + - kind: ServiceAccount + name: "" + version: v1 + - kind: ServiceMonitor + name: "" + version: v1 + - kind: StatefulSet + name: "" + version: v1 + specDescriptors: + - description: Limits defines the limits to be applied to log stream processing. + displayName: Rate Limiting + path: limits + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Global defines the limits applied globally across the cluster. + displayName: Global Limits + path: limits.global + - description: IngestionBurstSize defines the local rate-limited sample size + per distributor replica. It should be set to the set at least to the maximum + logs size expected in a single push request. + displayName: Ingestion Burst Size (in MB) + path: limits.global.ingestion.ingestionBurstSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: IngestionRate defines the sample size per second. Units MB. + displayName: Ingestion Rate (in MB) + path: limits.global.ingestion.ingestionRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxGlobalStreamsPerTenant defines the maximum number of active + streams per tenant, across the cluster. + displayName: Max Global Streams per Tenant + path: limits.global.ingestion.maxGlobalStreamsPerTenant + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNameLength defines the maximum number of characters allowed + for label keys in log streams. + displayName: Max Label Name Length + path: limits.global.ingestion.maxLabelNameLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNamesPerSeries defines the maximum number of label names + per series in each log stream. + displayName: Max Labels Names per Series + path: limits.global.ingestion.maxLabelNamesPerSeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelValueLength defines the maximum number of characters + allowed for label values in log streams. + displayName: Max Label Value Length + path: limits.global.ingestion.maxLabelValueLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLineSize defines the maximum line size on ingestion path. + Units in Bytes. + displayName: Max Line Size + path: limits.global.ingestion.maxLineSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxChunksPerQuery defines the maximum number of chunks that can + be fetched by a single query. + displayName: Max Chunk per Query + path: limits.global.queries.maxChunksPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries + that will be returned for a query. + displayName: Max Entries Limit per Query + path: limits.global.queries.maxEntriesLimitPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxQuerySeries defines the the maximum of unique series that + is returned by a metric query. + displayName: Max Query Series + path: limits.global.queries.maxQuerySeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Tenants defines the limits applied per tenant. + displayName: Limits per Tenant + path: limits.tenants + - description: IngestionBurstSize defines the local rate-limited sample size + per distributor replica. It should be set to the set at least to the maximum + logs size expected in a single push request. + displayName: Ingestion Burst Size (in MB) + path: limits.tenants.ingestion.ingestionBurstSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: IngestionRate defines the sample size per second. Units MB. + displayName: Ingestion Rate (in MB) + path: limits.tenants.ingestion.ingestionRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxGlobalStreamsPerTenant defines the maximum number of active + streams per tenant, across the cluster. + displayName: Max Global Streams per Tenant + path: limits.tenants.ingestion.maxGlobalStreamsPerTenant + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNameLength defines the maximum number of characters allowed + for label keys in log streams. + displayName: Max Label Name Length + path: limits.tenants.ingestion.maxLabelNameLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNamesPerSeries defines the maximum number of label names + per series in each log stream. + displayName: Max Labels Names per Series + path: limits.tenants.ingestion.maxLabelNamesPerSeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelValueLength defines the maximum number of characters + allowed for label values in log streams. + displayName: Max Label Value Length + path: limits.tenants.ingestion.maxLabelValueLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLineSize defines the maximum line size on ingestion path. + Units in Bytes. + displayName: Max Line Size + path: limits.tenants.ingestion.maxLineSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxChunksPerQuery defines the maximum number of chunks that can + be fetched by a single query. + displayName: Max Chunk per Query + path: limits.tenants.queries.maxChunksPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries + that will be returned for a query. + displayName: Max Entries Limit per Query + path: limits.tenants.queries.maxEntriesLimitPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxQuerySeries defines the the maximum of unique series that + is returned by a metric query. + displayName: Max Query Series + path: limits.tenants.queries.maxQuerySeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: ManagementState defines if the CR should be managed by the operator + or not. Default is managed. + displayName: Management State + path: managementState + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:Managed + - urn:alm:descriptor:com.tectonic.ui:select:Unmanaged + - description: ReplicationFactor defines the policy for log stream replication. + displayName: Replication Factor + path: replicationFactor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Size defines one of the support Loki deployment scale out sizes. + displayName: LokiStack Size + path: size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small + - urn:alm:descriptor:com.tectonic.ui:select:1x.small + - urn:alm:descriptor:com.tectonic.ui:select:1x.medium + - description: Storage defines the spec for the object storage endpoint to store + logs. + displayName: Object Storage + path: storage + - description: Name of a secret in the namespace configured for object storage + secrets. + displayName: Object Storage Secret + path: storage.secret.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: Storage class name defines the storage class for ingester/querier + PVCs. + displayName: Storage Class Name + path: storageClassName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:StorageClass + - description: Template defines the resource/limits/tolerations/nodeselectors + per component + displayName: Node Placement + path: template + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Compactor defines the compaction component spec. + displayName: Compactor pods + path: template.compactor + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.compactor.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Distributor defines the distributor component spec. + displayName: Distributor pods + path: template.distributor + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.distributor.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Gateway defines the lokistack gateway component spec. + displayName: Gateway pods + path: template.gateway + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.gateway.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: IndexGateway defines the index gateway component spec. + displayName: Index Gateway pods + path: template.indexGateway + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.indexGateway.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Ingester defines the ingester component spec. + displayName: Ingester pods + path: template.ingester + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.ingester.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Querier defines the querier component spec. + displayName: Querier pods + path: template.querier + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.querier.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: QueryFrontend defines the query frontend component spec. + displayName: Query Frontend pods + path: template.queryFrontend + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.queryFrontend.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Tenants defines the per-tenant authentication and authorization + spec for the lokistack-gateway component. + displayName: Tenants Configuration + path: tenants + - description: Authentication defines the lokistack-gateway component authentication + configuration spec per tenant. + displayName: Authentication + path: tenants.authentication + - description: OIDC defines the spec for the OIDC tenant's authentication. + displayName: OIDC Configuration + path: tenants.authentication[0].oidc + - description: IssuerURL defines the URL for issuer. + displayName: Issuer URL + path: tenants.authentication[0].oidc.issuerURL + - description: RedirectURL defines the URL for redirect. + displayName: Redirect URL + path: tenants.authentication[0].oidc.redirectURL + - description: Secret defines the spec for the clientID, clientSecret and issuerCAPath + for tenant's authentication. + displayName: Tenant Secret + path: tenants.authentication[0].oidc.secret + - description: Name of a secret in the namespace configured for tenant secrets. + displayName: Tenant Secret Name + path: tenants.authentication[0].oidc.secret.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: TenantID defines the id of the tenant. + displayName: Tenant ID + path: tenants.authentication[0].tenantId + - description: TenantName defines the name of the tenant. + displayName: Tenant Name + path: tenants.authentication[0].tenantName + - description: Authorization defines the lokistack-gateway component authorization + configuration spec per tenant. + displayName: Authorization + path: tenants.authorization + - description: OPA defines the spec for the third-party endpoint for tenant's + authorization. + displayName: OPA Configuration + path: tenants.authorization.opa + - description: URL defines the third-party endpoint for authorization. + displayName: OpenPolicyAgent URL + path: tenants.authorization.opa.url + - description: RoleBindings defines configuration to bind a set of roles to + a set of subjects. + displayName: Static Role Bindings + path: tenants.authorization.roleBindings + - description: Roles defines a set of permissions to interact with a tenant. + displayName: Static Roles + path: tenants.authorization.roles + - description: Mode defines the mode in which lokistack-gateway component will + be configured. + displayName: Mode + path: tenants.mode + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:static + - urn:alm:descriptor:com.tectonic.ui:select:dynamic + - urn:alm:descriptor:com.tectonic.ui:select:openshift-logging + statusDescriptors: + - description: Distributor is a map to the per pod status of the distributor + deployment + displayName: Distributor + path: components.distributor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Ingester is a map to the per pod status of the ingester statefulset + displayName: Ingester + path: components.ingester + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Querier is a map to the per pod status of the querier deployment + displayName: Querier + path: components.querier + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: QueryFrontend is a map to the per pod status of the query frontend + deployment + displayName: Query Frontend + path: components.queryFrontend + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Compactor is a map to the pod status of the compactor pod. + displayName: Compactor + path: components.compactor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Gateway is a map to the per pod status of the lokistack gateway + deployment. + displayName: Gateway + path: components.gateway + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: IndexGateway is a map to the per pod status of the index gateway + statefulset + displayName: IndexGateway + path: components.indexGateway + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Conditions of the Loki deployment health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 + description: | + The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. + ## Prerequisites and Requirements + ### Loki Operator Namespace + The Loki Operator must be deployed to the global operator group namespace `openshift-logging`. + ### Memory Considerations + Loki is a memory intensive application. The initial + set of OCP nodes may not be large enough to support the Loki cluster. Additional OCP nodes must be added + to the OCP cluster if you desire to run with the recommended (or better) memory. + displayName: Loki Operator + icon: + - base64data: PHN2ZyBpZD0iYWZiNDE1NDktYzU3MC00OWI3LTg1Y2QtNjU3NjAwZWRmMmUxIiBkYXRhLW5hbWU9IkxheWVyIDEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDcyMS4xNSA3MjEuMTUiPgogIDxkZWZzPgogICAgPHN0eWxlPgogICAgICAuYTQ0OGZkZWEtNGE0Yy00Njc4LTk3NmEtYzM3ODUzMDhhZTA2IHsKICAgICAgICBmaWxsOiAjZGIzOTI3OwogICAgICB9CgogICAgICAuZTEzMzA4YjgtNzQ4NS00Y2IwLTk3NjUtOGE1N2I5M2Y5MWE2IHsKICAgICAgICBmaWxsOiAjY2IzNzI4OwogICAgICB9CgogICAgICAuZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIHsKICAgICAgICBmaWxsOiAjZmZmOwogICAgICB9CgogICAgICAuYTA0MjBjYWMtZWJlNi00YzE4LWI5ODEtYWJiYTBiYTliMzY1IHsKICAgICAgICBmaWxsOiAjZTVlNWU0OwogICAgICB9CiAgICA8L3N0eWxlPgogIDwvZGVmcz4KICA8Y2lyY2xlIGNsYXNzPSJhNDQ4ZmRlYS00YTRjLTQ2NzgtOTc2YS1jMzc4NTMwOGFlMDYiIGN4PSIzNjAuNTgiIGN5PSIzNjAuNTgiIHI9IjM1OC4yOCIvPgogIDxwYXRoIGNsYXNzPSJlMTMzMDhiOC03NDg1LTRjYjAtOTc2NS04YTU3YjkzZjkxYTYiIGQ9Ik02MTMuNTQsMTA3LjMsMTA2Ljg4LDYxNGMxNDAsMTM4LjUxLDM2NS44MiwxMzguMDYsNTA1LjI2LTEuMzlTNzUyLDI0Ny4zMyw2MTMuNTQsMTA3LjNaIi8+CiAgPGc+CiAgICA8Y2lyY2xlIGNsYXNzPSJlNzcyODZmMS0yMmRhLTQ0ZDEtOGVmYi0xZDE0YjBjY2FlNjIiIGN4PSIyMzQuNyIgY3k9IjM1Ny4zIiByPSI0Ny43MiIvPgogICAgPGNpcmNsZSBjbGFzcz0iZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIiBjeD0iMjM0LjciIGN5PSIxODIuOTQiIHI9IjQ3LjcyIi8+CiAgICA8Y2lyY2xlIGNsYXNzPSJlNzcyODZmMS0yMmRhLTQ0ZDEtOGVmYi0xZDE0YjBjY2FlNjIiIGN4PSIyMzQuNyIgY3k9IjUzOC4yMSIgcj0iNDcuNzIiLz4KICA8L2c+CiAgPHBvbHlnb24gY2xhc3M9ImU3NzI4NmYxLTIyZGEtNDRkMS04ZWZiLTFkMTRiMGNjYWU2MiIgcG9pbnRzPSI0MzUuMTkgMzQ3LjMgMzkwLjU0IDM0Ny4zIDM5MC41NCAxNzIuOTQgMzE2LjE2IDE3Mi45NCAzMTYuMTYgMTkyLjk0IDM3MC41NCAxOTIuOTQgMzcwLjU0IDM0Ny4zIDMxNi4xNiAzNDcuMyAzMTYuMTYgMzY3LjMgMzcwLjU0IDM2Ny4zIDM3MC41NCA1MjEuNjcgMzE2LjE2IDUyMS42NyAzMTYuMTYgNTQxLjY3IDM5MC41NCA1NDEuNjcgMzkwLjU0IDM2Ny4zIDQzNS4xOSAzNjcuMyA0MzUuMTkgMzQ3LjMiLz4KICA8cG9seWdvbiBjbGFzcz0iZTc3Mjg2ZjEtMjJkYS00NGQxLThlZmItMWQxNGIwY2NhZTYyIiBwb2ludHM9IjU5OS43NCAzMTcuMDMgNTU3Ljk3IDMxNy4wMyA1NTAuOTcgMzE3LjAzIDU1MC45NyAzMTAuMDMgNTUwLjk3IDI2OC4yNiA1NTAuOTcgMjY4LjI2IDQ2NC4zNiAyNjguMjYgNDY0LjM2IDQ0Ni4zNCA1OTkuNzQgNDQ2LjM0IDU5OS43NCAzMTcuMDMgNTk5Ljc0IDMxNy4wMyIvPgogIDxwb2x5Z29uIGNsYXNzPSJhMDQyMGNhYy1lYmU2LTRjMTgtYjk4MS1hYmJhMGJhOWIzNjUiIHBvaW50cz0iNTk5Ljc0IDMxMC4wMyA1NTcuOTcgMjY4LjI2IDU1Ny45NyAzMTAuMDMgNTk5Ljc0IDMxMC4wMyIvPgo8L3N2Zz4K + mediatype: image/svg+xml + install: + spec: + deployments: null + strategy: "" + installModes: + - supported: true + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - logging + - loki + links: + - name: Loki Operator + url: https://github.com/grafana/loki + maintainers: + - email: loki-operator-team@googlegroups.com + name: Grafana Loki SIG Operator + maturity: alpha + provider: + name: Grafana.com + version: 0.0.0 diff --git a/operator/config/manifests/kustomization.yaml b/operator/config/manifests/kustomization.yaml new file mode 100644 index 0000000000..5d9120c449 --- /dev/null +++ b/operator/config/manifests/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- ../overlays/openshift +- ../samples +- ../scorecard diff --git a/operator/config/overlays/development/kustomization.yaml b/operator/config/overlays/development/kustomization.yaml new file mode 100644 index 0000000000..327464bacc --- /dev/null +++ b/operator/config/overlays/development/kustomization.yaml @@ -0,0 +1,22 @@ +resources: +- ../../crd +- ../../rbac +- ../../manager +- ./minio + +# Adds namespace to all resources. +namespace: default + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue +commonLabels: + app.kubernetes.io/name: loki-operator + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/version: "0.0.1" + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/managed-by: operator-lifecycle-manager + +patchesStrategicMerge: +- manager_related_image_patch.yaml +- manager_image_pull_policy_patch.yaml diff --git a/operator/config/overlays/development/manager_image_pull_policy_patch.yaml b/operator/config/overlays/development/manager_image_pull_policy_patch.yaml new file mode 100644 index 0000000000..5e9ea970bc --- /dev/null +++ b/operator/config/overlays/development/manager_image_pull_policy_patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Always diff --git a/operator/config/overlays/development/manager_related_image_patch.yaml b/operator/config/overlays/development/manager_related_image_patch.yaml new file mode 100644 index 0000000000..c52949ca00 --- /dev/null +++ b/operator/config/overlays/development/manager_related_image_patch.yaml @@ -0,0 +1,14 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - name: manager + env: + - name: RELATED_IMAGE_LOKI + value: docker.io/grafana/loki:2.4.1 + - name: RELATED_IMAGE_GATEWAY + value: quay.io/observatorium/api:latest diff --git a/operator/config/overlays/development/minio/deployment.yaml b/operator/config/overlays/development/minio/deployment.yaml new file mode 100644 index 0000000000..6b3a16d3ea --- /dev/null +++ b/operator/config/overlays/development/minio/deployment.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minio +spec: + selector: + matchLabels: + app.kubernetes.io/name: minio + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/name: minio + spec: + containers: + - command: + - /bin/sh + - -c + - | + mkdir -p /storage/loki && \ + minio server /storage + env: + - name: MINIO_ACCESS_KEY + value: minio + - name: MINIO_SECRET_KEY + value: minio123 + image: minio/minio + name: minio + ports: + - containerPort: 9000 + volumeMounts: + - mountPath: /storage + name: storage + volumes: + - name: storage + persistentVolumeClaim: + claimName: minio diff --git a/operator/config/overlays/development/minio/kustomization.yaml b/operator/config/overlays/development/minio/kustomization.yaml new file mode 100644 index 0000000000..7d7f05023a --- /dev/null +++ b/operator/config/overlays/development/minio/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- pvc.yaml +- service.yaml +- secret.yaml +- deployment.yaml diff --git a/operator/config/overlays/development/minio/pvc.yaml b/operator/config/overlays/development/minio/pvc.yaml new file mode 100644 index 0000000000..256596702e --- /dev/null +++ b/operator/config/overlays/development/minio/pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app.kubernetes.io/name: minio + name: minio +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi diff --git a/operator/config/overlays/development/minio/secret.yaml b/operator/config/overlays/development/minio/secret.yaml new file mode 100644 index 0000000000..769f9610fd --- /dev/null +++ b/operator/config/overlays/development/minio/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: test +stringData: + endpoint: http://minio.default.svc.cluster.local.:9000 + bucketnames: loki + access_key_id: minio + access_key_secret: minio123 +type: Opaque diff --git a/operator/config/overlays/development/minio/service.yaml b/operator/config/overlays/development/minio/service.yaml new file mode 100644 index 0000000000..20880005d7 --- /dev/null +++ b/operator/config/overlays/development/minio/service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: minio +spec: + ports: + - port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app.kubernetes.io/name: minio + type: ClusterIP diff --git a/operator/config/overlays/openshift/kustomization.yaml b/operator/config/overlays/openshift/kustomization.yaml new file mode 100644 index 0000000000..2bdec0785e --- /dev/null +++ b/operator/config/overlays/openshift/kustomization.yaml @@ -0,0 +1,90 @@ +resources: +- ../../crd +- ../../rbac +- ../../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +- ../../prometheus + +# Adds namespace to all resources. +namespace: openshift-logging + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: loki-operator- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue +commonLabels: + app.kubernetes.io/name: loki-operator + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/version: "0.0.1" + app.kubernetes.io/part-of: cluster-logging + app.kubernetes.io/managed-by: operator-lifecycle-manager + +patchesStrategicMerge: +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. +- manager_auth_proxy_patch.yaml +- manager_related_image_patch.yaml +- manager_run_flags_patch.yaml +- prometheus_service_monitor_patch.yaml + +# apiVersion: kustomize.config.k8s.io/v1beta1 +# kind: Kustomization +images: +- name: controller + newName: quay.io/openshift-logging/loki-operator + newTag: v0.0.1 + +# Mount the controller config file for loading manager configurations +# through a ComponentConfig type +#- manager_config_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldref: +# fieldpath: metadata.namespace +#- name: CERTIFICATE_NAME +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +#- name: SERVICE_NAMESPACE # namespace of the service +# objref: +# kind: Service +# version: v1 +# name: webhook-service +# fieldref: +# fieldpath: metadata.namespace +#- name: SERVICE_NAME +# objref: +# kind: Service +# version: v1 +# name: webhook-service diff --git a/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml b/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml new file mode 100644 index 0000000000..750338a753 --- /dev/null +++ b/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + image: quay.io/openshift/origin-kube-rbac-proxy:latest + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt" + - "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key" + - "--v=2" + ports: + - containerPort: 8443 + name: https + volumeMounts: + - mountPath: /var/run/secrets/serving-cert + name: loki-operator-metrics-cert + volumes: + - name: loki-operator-metrics-cert + secret: + defaultMode: 420 + optional: true + secretName: loki-operator-metrics diff --git a/operator/config/overlays/openshift/manager_related_image_patch.yaml b/operator/config/overlays/openshift/manager_related_image_patch.yaml new file mode 100644 index 0000000000..a7fc32038d --- /dev/null +++ b/operator/config/overlays/openshift/manager_related_image_patch.yaml @@ -0,0 +1,16 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - name: manager + env: + - name: RELATED_IMAGE_LOKI + value: quay.io/openshift-logging/loki:v2.4.1 + - name: RELATED_IMAGE_GATEWAY + value: quay.io/observatorium/api:latest + - name: RELATED_IMAGE_OPA + value: quay.io/observatorium/opa-openshift:latest diff --git a/operator/config/overlays/openshift/manager_run_flags_patch.yaml b/operator/config/overlays/openshift/manager_run_flags_patch.yaml new file mode 100644 index 0000000000..23e9034d38 --- /dev/null +++ b/operator/config/overlays/openshift/manager_run_flags_patch.yaml @@ -0,0 +1,15 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - name: manager + args: + - "--with-lokistack-gateway" + - "--with-lokistack-gateway-route" + - "--with-cert-signing-service" + - "--with-service-monitors" + - "--with-tls-service-monitors" diff --git a/operator/config/overlays/openshift/prometheus_service_monitor_patch.yaml b/operator/config/overlays/openshift/prometheus_service_monitor_patch.yaml new file mode 100644 index 0000000000..6e852cfb28 --- /dev/null +++ b/operator/config/overlays/openshift/prometheus_service_monitor_patch.yaml @@ -0,0 +1,17 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + name: loki-operator + name: metrics-monitor +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + path: /metrics + targetPort: 8443 + scheme: https + interval: 30s + scrapeTimeout: 10s + tlsConfig: + caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + serverName: loki-operator-controller-manager-metrics-service.openshift-logging.svc diff --git a/operator/config/overlays/openshift/size-calculator/cluster_monitoring_config.yaml b/operator/config/overlays/openshift/size-calculator/cluster_monitoring_config.yaml new file mode 100644 index 0000000000..0d768886ce --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/cluster_monitoring_config.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-monitoring-config + namespace: openshift-monitoring +data: + config.yaml: | + enableUserWorkload: true diff --git a/operator/config/overlays/openshift/size-calculator/kustomization.yaml b/operator/config/overlays/openshift/size-calculator/kustomization.yaml new file mode 100644 index 0000000000..dd6012fa90 --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/kustomization.yaml @@ -0,0 +1,23 @@ +resources: +- logfile_metric_daemonset.yaml +- logfile_metric_role.yaml +- logfile_metric_role_binding.yaml +- logfile_metric_scc.yaml +- logfile_metric_service.yaml +- logfile_metric_service_account.yaml +- logfile_metric_service_monitor.yaml +- storage_size_calculator_config.yaml +- storage_size_calculator.yaml + +# Adds namespace to all resources. +namespace: openshift-logging + +# Labels to add to all resources and selectors. +# commonLabels: +# someName: someValue +commonLabels: + app.kubernetes.io/name: storage-size-calculator + app.kubernetes.io/instance: storage-size-calculator-v0.0.1 + app.kubernetes.io/version: "0.0.1" + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/managed-by: kubectl-apply diff --git a/operator/config/overlays/openshift/size-calculator/logfile_metric_daemonset.yaml b/operator/config/overlays/openshift/size-calculator/logfile_metric_daemonset.yaml new file mode 100644 index 0000000000..e70d73d1b7 --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/logfile_metric_daemonset.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: log-file-metric-exporter + labels: + name: log-file-metric-exporter +spec: + selector: + matchLabels: + name: log-file-metric-exporter + template: + metadata: + labels: + name: log-file-metric-exporter + spec: + nodeSelector: + kubernetes.io/os: linux + containers: + - name: log-file-metric-exporter + image: quay.io/openshift-logging/log-file-metric-exporter:latest + imagePullPolicy: IfNotPresent + command: + - /usr/local/bin/log-file-metric-exporter + - -verbosity=2 + - -dir=/var/log/containers + - -http=:2112 + - -keyFile=/var/run/secrets/serving-cert/tls.key + - -crtFile=/var/run/secrets/serving-cert/tls.crt + ports: + - containerPort: 2112 + name: logfile-metrics + protocol: TCP + volumeMounts: + - mountPath: /var/run/secrets/serving-cert + name: log-file-metric-exporter-metrics + - mountPath: /var/log + name: logfile-varlog + securityContext: + seLinuxOptions: + type: spc_t + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + serviceAccount: log-file-metric-exporter + volumes: + - name: log-file-metric-exporter-metrics + secret: + defaultMode: 420 + optional: true + secretName: log-file-metric-exporter-metrics + - name: logfile-varlog + hostPath: + path: /var/log + - name: storage-size-calculator-ca-bundle + configMap: + name: storage-size-calculator-ca-bundle diff --git a/operator/config/overlays/openshift/size-calculator/logfile_metric_role.yaml b/operator/config/overlays/openshift/size-calculator/logfile_metric_role.yaml new file mode 100644 index 0000000000..5ab8a7a049 --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/logfile_metric_role.yaml @@ -0,0 +1,13 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: log-file-metric-exporter-privileged +rules: + - verbs: + - use + apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + resourceNames: + - log-file-metric-exporter-scc diff --git a/operator/config/overlays/openshift/size-calculator/logfile_metric_role_binding.yaml b/operator/config/overlays/openshift/size-calculator/logfile_metric_role_binding.yaml new file mode 100644 index 0000000000..a55beb558b --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/logfile_metric_role_binding.yaml @@ -0,0 +1,11 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: log-file-metric-exporter-privileged-binding +subjects: + - kind: ServiceAccount + name: log-file-metric-exporter +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: log-file-metric-exporter-privileged diff --git a/operator/config/overlays/openshift/size-calculator/logfile_metric_scc.yaml b/operator/config/overlays/openshift/size-calculator/logfile_metric_scc.yaml new file mode 100644 index 0000000000..2ea0dc6bcb --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/logfile_metric_scc.yaml @@ -0,0 +1,43 @@ +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: log-file-metric-exporter-scc +allowPrivilegedContainer: true +requiredDropCapabilities: + - MKNOD + - CHOWN + - DAC_OVERRIDE + - FSETID + - FOWNER + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - KILL +allowHostDirVolumePlugin: true +allowHostPorts: false +runAsUser: + type: RunAsAny +users: [] +allowHostIPC: false +seLinuxContext: + type: RunAsAny +readOnlyRootFilesystem: false +fsGroup: + type: RunAsAny +groups: + - 'system:cluster-admins' +defaultAddCapabilities: null +supplementalGroups: + type: RunAsAny +volumes: + - configMap + - downwardAPI + - emptyDir + - persistentVolumeClaim + - projected + - secret +allowHostPID: false +allowHostNetwork: false +allowPrivilegeEscalation: true +allowedCapabilities: null diff --git a/operator/config/overlays/openshift/size-calculator/logfile_metric_service.yaml b/operator/config/overlays/openshift/size-calculator/logfile_metric_service.yaml new file mode 100644 index 0000000000..ee1761e597 --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/logfile_metric_service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: log-file-metric-exporter-metrics + labels: + name: log-file-metric-exporter + annotations: + service.beta.openshift.io/serving-cert-secret-name: log-file-metric-exporter-metrics +spec: + ports: + - name: logfile-metrics + port: 2112 + protocol: TCP + targetPort: logfile-metrics + selector: + name: log-file-metric-exporter diff --git a/operator/config/overlays/openshift/size-calculator/logfile_metric_service_account.yaml b/operator/config/overlays/openshift/size-calculator/logfile_metric_service_account.yaml new file mode 100644 index 0000000000..20e9999de0 --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/logfile_metric_service_account.yaml @@ -0,0 +1,9 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: log-file-metric-exporter +secrets: + - name: logfile-metric-dockercfg + - name: logfile-metric-token +imagePullSecrets: + - name: logfile-metric-dockercfg diff --git a/operator/config/overlays/openshift/size-calculator/logfile_metric_service_monitor.yaml b/operator/config/overlays/openshift/size-calculator/logfile_metric_service_monitor.yaml new file mode 100644 index 0000000000..9396a8d35d --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/logfile_metric_service_monitor.yaml @@ -0,0 +1,20 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: monitor-log-file-metric-exporter + labels: + name: log-file-metric-exporter +spec: + selector: + matchLabels: + name: log-file-metric-exporter + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + path: /metrics + port: logfile-metrics + scheme: https + interval: 30s + scrapeTimeout: 10s + tlsConfig: + caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + serverName: log-file-metric-exporter-metrics.openshift-logging.svc diff --git a/operator/config/overlays/openshift/size-calculator/storage_size_calculator.yaml b/operator/config/overlays/openshift/size-calculator/storage_size_calculator.yaml new file mode 100644 index 0000000000..e7f19af1d3 --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/storage_size_calculator.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: storage-size-calculator +spec: + selector: + matchLabels: + name: log-file-metric-exporter + replicas: 1 + template: + metadata: + labels: + name: log-file-metric-exporter + spec: + containers: + - command: + - /size-calculator + image: quay.io/openshift-logging/storage-size-calculator:latest + imagePullPolicy: Always + name: size-calculator + ports: + - containerPort: 2112 + name: logfile-metrics + securityContext: + allowPrivilegeEscalation: false + env: + - name: PROMETHEUS_URL + valueFrom: + secretKeyRef: + name: promsecret + key: prometheus_url + - name: PROMETHEUS_TOKEN + valueFrom: + secretKeyRef: + name: promsecret + key: prometheus_token + terminationGracePeriodSeconds: 10 + serviceAccount: log-file-metric-exporter diff --git a/operator/config/overlays/openshift/size-calculator/storage_size_calculator_config.yaml b/operator/config/overlays/openshift/size-calculator/storage_size_calculator_config.yaml new file mode 100644 index 0000000000..2846cabad7 --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/storage_size_calculator_config.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: storage-size-calculator-ca-bundle + annotations: + "service.beta.openshift.io/inject-cabundle": "true" diff --git a/operator/config/overlays/openshift/size-calculator/user_workload_monitoring_config.yaml b/operator/config/overlays/openshift/size-calculator/user_workload_monitoring_config.yaml new file mode 100644 index 0000000000..9e93ef9e93 --- /dev/null +++ b/operator/config/overlays/openshift/size-calculator/user_workload_monitoring_config.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + prometheus: + retention: 1h diff --git a/operator/config/overlays/production/kustomization.yaml b/operator/config/overlays/production/kustomization.yaml new file mode 100644 index 0000000000..1e0a4b8ae6 --- /dev/null +++ b/operator/config/overlays/production/kustomization.yaml @@ -0,0 +1,88 @@ +resources: +- ../../crd +- ../../rbac +- ../../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +- ../../prometheus + +# Adds namespace to all resources. +namespace: loki-operator + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: loki-operator- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue +commonLabels: + app.kubernetes.io/name: loki-operator + app.kubernetes.io/instance: loki-operator-v0.0.1 + app.kubernetes.io/version: "0.0.1" + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/managed-by: operator-lifecycle-manager + +patchesStrategicMerge: +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. +- manager_auth_proxy_patch.yaml +- manager_related_image_patch.yaml +- manager_run_flags_patch.yaml +- prometheus_service_monitor_patch.yaml + +images: +- name: controller + newName: quay.io/viaq/loki-operator + newTag: v0.0.1 + +# Mount the controller config file for loading manager configurations +# through a ComponentConfig type +#- manager_config_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldref: +# fieldpath: metadata.namespace +#- name: CERTIFICATE_NAME +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +#- name: SERVICE_NAMESPACE # namespace of the service +# objref: +# kind: Service +# version: v1 +# name: webhook-service +# fieldref: +# fieldpath: metadata.namespace +#- name: SERVICE_NAME +# objref: +# kind: Service +# version: v1 +# name: webhook-service diff --git a/operator/config/overlays/production/manager_auth_proxy_patch.yaml b/operator/config/overlays/production/manager_auth_proxy_patch.yaml new file mode 100644 index 0000000000..64d222654d --- /dev/null +++ b/operator/config/overlays/production/manager_auth_proxy_patch.yaml @@ -0,0 +1,31 @@ +# This patch inject a sidecar container which is a HTTP proxy for the +# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt" + - "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key" + - "--v=2" + ports: + - containerPort: 8443 + name: https + volumeMounts: + - mountPath: /var/run/secrets/serving-cert + name: loki-operator-metrics-cert + volumes: + - name: loki-operator-metrics-cert + secret: + defaultMode: 420 + optional: true + secretName: loki-operator-metrics diff --git a/operator/config/overlays/production/manager_related_image_patch.yaml b/operator/config/overlays/production/manager_related_image_patch.yaml new file mode 100644 index 0000000000..c52949ca00 --- /dev/null +++ b/operator/config/overlays/production/manager_related_image_patch.yaml @@ -0,0 +1,14 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - name: manager + env: + - name: RELATED_IMAGE_LOKI + value: docker.io/grafana/loki:2.4.1 + - name: RELATED_IMAGE_GATEWAY + value: quay.io/observatorium/api:latest diff --git a/operator/config/overlays/production/manager_run_flags_patch.yaml b/operator/config/overlays/production/manager_run_flags_patch.yaml new file mode 100644 index 0000000000..8c9ce6420f --- /dev/null +++ b/operator/config/overlays/production/manager_run_flags_patch.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - name: manager + args: + - "--with-lokistack-gateway" diff --git a/operator/config/overlays/production/prometheus_service_monitor_patch.yaml b/operator/config/overlays/production/prometheus_service_monitor_patch.yaml new file mode 100644 index 0000000000..ca346871f8 --- /dev/null +++ b/operator/config/overlays/production/prometheus_service_monitor_patch.yaml @@ -0,0 +1,18 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + name: loki-operator + name: metrics-monitor +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + path: /metrics + targetPort: 8443 + scheme: https + interval: 30s + scrapeTimeout: 10s + tlsConfig: + caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + serverName: loki-operator-controller-manager-metrics-service.loki-operator.svc diff --git a/operator/config/prometheus/kustomization.yaml b/operator/config/prometheus/kustomization.yaml new file mode 100644 index 0000000000..ed137168a1 --- /dev/null +++ b/operator/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/operator/config/prometheus/monitor.yaml b/operator/config/prometheus/monitor.yaml new file mode 100644 index 0000000000..9a4fabe3c5 --- /dev/null +++ b/operator/config/prometheus/monitor.yaml @@ -0,0 +1,12 @@ + +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + name: loki-operator + name: metrics-monitor +spec: + selector: + matchLabels: + app.kubernetes.io/name: loki-operator diff --git a/operator/config/rbac/auth_proxy_client_clusterrole.yaml b/operator/config/rbac/auth_proxy_client_clusterrole.yaml new file mode 100644 index 0000000000..bd4af137a9 --- /dev/null +++ b/operator/config/rbac/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,7 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: ["/metrics"] + verbs: ["get"] diff --git a/operator/config/rbac/auth_proxy_role.yaml b/operator/config/rbac/auth_proxy_role.yaml new file mode 100644 index 0000000000..618f5e4177 --- /dev/null +++ b/operator/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: proxy-role +rules: +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] diff --git a/operator/config/rbac/auth_proxy_role_binding.yaml b/operator/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 0000000000..48ed1e4b85 --- /dev/null +++ b/operator/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/operator/config/rbac/auth_proxy_service.yaml b/operator/config/rbac/auth_proxy_service.yaml new file mode 100644 index 0000000000..0e34e68d70 --- /dev/null +++ b/operator/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.openshift.io/serving-cert-secret-name: loki-operator-metrics + labels: + name: controller-manager-metrics-service +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + name: loki-operator-controller-manager diff --git a/operator/config/rbac/kustomization.yaml b/operator/config/rbac/kustomization.yaml new file mode 100644 index 0000000000..b48c43c169 --- /dev/null +++ b/operator/config/rbac/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml +- auth_proxy_client_clusterrole.yaml +- prometheus_role.yaml +- prometheus_role_binding.yaml diff --git a/operator/config/rbac/leader_election_role.yaml b/operator/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000000..6334cc51c8 --- /dev/null +++ b/operator/config/rbac/leader_election_role.yaml @@ -0,0 +1,27 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/operator/config/rbac/leader_election_role_binding.yaml b/operator/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000000..eed16906f4 --- /dev/null +++ b/operator/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/operator/config/rbac/lokistack_editor_role.yaml b/operator/config/rbac/lokistack_editor_role.yaml new file mode 100644 index 0000000000..c9d104a810 --- /dev/null +++ b/operator/config/rbac/lokistack_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit lokistacks. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lokistack-editor-role +rules: +- apiGroups: + - loki.grafana.com + resources: + - lokistacks + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - loki.grafana.com + resources: + - lokistacks/status + verbs: + - get diff --git a/operator/config/rbac/lokistack_viewer_role.yaml b/operator/config/rbac/lokistack_viewer_role.yaml new file mode 100644 index 0000000000..d5f3b0d040 --- /dev/null +++ b/operator/config/rbac/lokistack_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view lokistacks. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lokistack-viewer-role +rules: +- apiGroups: + - loki.grafana.com + resources: + - lokistacks + verbs: + - get + - list + - watch +- apiGroups: + - loki.grafana.com + resources: + - lokistacks/status + verbs: + - get diff --git a/operator/config/rbac/prometheus_role.yaml b/operator/config/rbac/prometheus_role.yaml new file mode 100644 index 0000000000..c99de1f9fb --- /dev/null +++ b/operator/config/rbac/prometheus_role.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: prometheus +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch diff --git a/operator/config/rbac/prometheus_role_binding.yaml b/operator/config/rbac/prometheus_role_binding.yaml new file mode 100644 index 0000000000..e12439c227 --- /dev/null +++ b/operator/config/rbac/prometheus_role_binding.yaml @@ -0,0 +1,16 @@ +# Grant cluster-monitoring access to openshift-operators-redhat metrics +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring diff --git a/operator/config/rbac/role.yaml b/operator/config/rbac/role.yaml new file mode 100644 index 0000000000..c3876d1724 --- /dev/null +++ b/operator/config/rbac/role.yaml @@ -0,0 +1,131 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - config.openshift.io + resources: + - dnses + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - loki.grafana.com + resources: + - lokistacks + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - loki.grafana.com + resources: + - lokistacks/finalizers + verbs: + - update +- apiGroups: + - loki.grafana.com + resources: + - lokistacks/status + verbs: + - get + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - create + - get + - list + - update + - watch diff --git a/operator/config/rbac/role_binding.yaml b/operator/config/rbac/role_binding.yaml new file mode 100644 index 0000000000..8f2658702c --- /dev/null +++ b/operator/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/operator/config/samples/kustomization.yaml b/operator/config/samples/kustomization.yaml new file mode 100644 index 0000000000..1ba3ca969b --- /dev/null +++ b/operator/config/samples/kustomization.yaml @@ -0,0 +1,4 @@ +## Append samples you want in your CSV to this file as resources ## +resources: +- loki_v1beta1_lokistack.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/operator/config/samples/loki_v1beta1_lokistack.yaml b/operator/config/samples/loki_v1beta1_lokistack.yaml new file mode 100644 index 0000000000..c32f01681d --- /dev/null +++ b/operator/config/samples/loki_v1beta1_lokistack.yaml @@ -0,0 +1,11 @@ +apiVersion: loki.grafana.com/v1beta1 +kind: LokiStack +metadata: + name: lokistack-sample +spec: + size: 1x.small + replicationFactor: 2 + storage: + secret: + name: test + storageClassName: standard diff --git a/operator/config/scorecard/bases/config.yaml b/operator/config/scorecard/bases/config.yaml new file mode 100644 index 0000000000..c77047841e --- /dev/null +++ b/operator/config/scorecard/bases/config.yaml @@ -0,0 +1,7 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/operator/config/scorecard/kustomization.yaml b/operator/config/scorecard/kustomization.yaml new file mode 100644 index 0000000000..d73509ee73 --- /dev/null +++ b/operator/config/scorecard/kustomization.yaml @@ -0,0 +1,16 @@ +resources: +- bases/config.yaml +patchesJson6902: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +# +kubebuilder:scaffold:patchesJson6902 diff --git a/operator/config/scorecard/patches/basic.config.yaml b/operator/config/scorecard/patches/basic.config.yaml new file mode 100644 index 0000000000..4581edc9b0 --- /dev/null +++ b/operator/config/scorecard/patches/basic.config.yaml @@ -0,0 +1,10 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: basic + test: basic-check-spec-test diff --git a/operator/config/scorecard/patches/olm.config.yaml b/operator/config/scorecard/patches/olm.config.yaml new file mode 100644 index 0000000000..9422681430 --- /dev/null +++ b/operator/config/scorecard/patches/olm.config.yaml @@ -0,0 +1,50 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/operator/controllers/internal/management/state/state.go b/operator/controllers/internal/management/state/state.go new file mode 100644 index 0000000000..8f6f65b8a8 --- /dev/null +++ b/operator/controllers/internal/management/state/state.go @@ -0,0 +1,30 @@ +package state + +import ( + "context" + + "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/log" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + ctrl "sigs.k8s.io/controller-runtime" +) + +// IsManaged checks if the custom resource is configured with ManagementState Managed. +func IsManaged(ctx context.Context, req ctrl.Request, k k8s.Client) (bool, error) { + ll := log.WithValues("lokistack", req.NamespacedName) + + var stack lokiv1beta1.LokiStack + if err := k.Get(ctx, req.NamespacedName, &stack); err != nil { + if apierrors.IsNotFound(err) { + // maybe the user deleted it before we could react? Either way this isn't an issue + ll.Error(err, "could not find the requested loki stack", "name", req.NamespacedName) + return false, nil + } + return false, kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) + } + return stack.Spec.ManagementState == lokiv1beta1.ManagementStateManaged, nil +} diff --git a/operator/controllers/internal/management/state/state_test.go b/operator/controllers/internal/management/state/state_test.go new file mode 100644 index 0000000000..30f554a356 --- /dev/null +++ b/operator/controllers/internal/management/state/state_test.go @@ -0,0 +1,118 @@ +package state_test + +import ( + "context" + "testing" + + "github.com/ViaQ/logerr/kverrors" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/controllers/internal/management/state" + "github.com/grafana/loki-operator/internal/external/k8s/k8sfakes" + "github.com/stretchr/testify/require" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestIsManaged(t *testing.T) { + type test struct { + name string + stack lokiv1beta1.LokiStack + wantOk bool + } + + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + table := []test{ + { + name: "managed", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + ManagementState: lokiv1beta1.ManagementStateManaged, + }, + }, + wantOk: true, + }, + { + name: "unmanaged", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + ManagementState: lokiv1beta1.ManagementStateUnmanaged, + }, + }, + }, + } + for _, tst := range table { + t.Run(tst.name, func(t *testing.T) { + k.GetStub = func(_ context.Context, _ types.NamespacedName, object client.Object) error { + k.SetClientObject(object, &tst.stack) + return nil + } + ok, err := state.IsManaged(context.TODO(), r, k) + require.NoError(t, err) + require.Equal(t, ok, tst.wantOk) + }) + } +} + +func TestIsManaged_WhenError_ReturnNotManagedWithError(t *testing.T) { + type test struct { + name string + apierror error + wantErr error + } + + badReqErr := apierrors.NewBadRequest("bad request") + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + table := []test{ + { + name: "stack not found error", + apierror: apierrors.NewNotFound(schema.GroupResource{}, "something not found"), + }, + { + name: "any other api error", + apierror: badReqErr, + wantErr: kverrors.Wrap(badReqErr, "failed to lookup lokistack", "name", r.NamespacedName), + }, + } + for _, tst := range table { + t.Run(tst.name, func(t *testing.T) { + k.GetReturns(tst.apierror) + ok, err := state.IsManaged(context.TODO(), r, k) + require.Equal(t, tst.wantErr, err) + require.False(t, ok) + }) + } +} diff --git a/operator/controllers/lokistack_controller.go b/operator/controllers/lokistack_controller.go new file mode 100644 index 0000000000..825c63c242 --- /dev/null +++ b/operator/controllers/lokistack_controller.go @@ -0,0 +1,149 @@ +package controllers + +import ( + "context" + "time" + + "github.com/go-logr/logr" + "github.com/grafana/loki-operator/controllers/internal/management/state" + "github.com/grafana/loki-operator/internal/external/k8s" + "github.com/grafana/loki-operator/internal/handlers" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/grafana/loki-operator/internal/status" + routev1 "github.com/openshift/api/route/v1" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" +) + +var ( + createOrUpdateOnlyPred = builder.WithPredicates(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Update only if generation changes, filter out anything else. + // We only need to check generation here, because it is only + // updated on spec changes. On the other hand RevisionVersion + // changes also on status changes. We want to omit reconciliation + // for status updates for now. + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + CreateFunc: func(e event.CreateEvent) bool { return true }, + DeleteFunc: func(e event.DeleteEvent) bool { return false }, + GenericFunc: func(e event.GenericEvent) bool { return false }, + }) + updateOrDeleteOnlyPred = builder.WithPredicates(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + switch e.ObjectOld.(type) { + case *appsv1.Deployment: + case *appsv1.StatefulSet: + return true + } + return false + }, + CreateFunc: func(e event.CreateEvent) bool { return false }, + DeleteFunc: func(e event.DeleteEvent) bool { + // DeleteStateUnknown evaluates to false only if the object + // has been confirmed as deleted by the api server. + return !e.DeleteStateUnknown + }, + GenericFunc: func(e event.GenericEvent) bool { return false }, + }) +) + +// LokiStackReconciler reconciles a LokiStack object +type LokiStackReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Flags manifests.FeatureFlags +} + +// +kubebuilder:rbac:groups=loki.grafana.com,resources=lokistacks,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=loki.grafana.com,resources=lokistacks/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=loki.grafana.com,resources=lokistacks/finalizers,verbs=update +// +kubebuilder:rbac:groups="",resources=pods;nodes;services;endpoints;configmaps;serviceaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch +// +kubebuilder:rbac:groups=apps,resources=deployments;statefulsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings;clusterroles,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update +// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;create;update +// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update +// +kubebuilder:rbac:groups=config.openshift.io,resources=dnses,verbs=get;list;watch +// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;watch;create;update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// Compare the state specified by the LokiStack object against the actual cluster state, +// and then perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.0/pkg/reconcile +func (r *LokiStackReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + ok, err := state.IsManaged(ctx, req, r.Client) + if err != nil { + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second, + }, err + } + if !ok { + r.Log.Info("Skipping reconciliation for unmanaged lokistack resource", "name", req.NamespacedName) + // Stop requeueing for unmanaged LokiStack custom resources + return ctrl.Result{}, nil + } + + err = handlers.CreateOrUpdateLokiStack(ctx, req, r.Client, r.Scheme, r.Flags) + if err != nil { + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second, + }, err + } + + err = status.Refresh(ctx, r.Client, req) + if err != nil { + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second, + }, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *LokiStackReconciler) SetupWithManager(mgr manager.Manager) error { + b := ctrl.NewControllerManagedBy(mgr) + return r.buildController(k8s.NewCtrlBuilder(b)) +} + +func (r *LokiStackReconciler) buildController(bld k8s.Builder) error { + bld = bld. + For(&lokiv1beta1.LokiStack{}, createOrUpdateOnlyPred). + Owns(&corev1.ConfigMap{}, updateOrDeleteOnlyPred). + Owns(&corev1.ServiceAccount{}, updateOrDeleteOnlyPred). + Owns(&corev1.Service{}, updateOrDeleteOnlyPred). + Owns(&appsv1.Deployment{}, updateOrDeleteOnlyPred). + Owns(&appsv1.StatefulSet{}, updateOrDeleteOnlyPred). + Owns(&rbacv1.ClusterRole{}, updateOrDeleteOnlyPred). + Owns(&rbacv1.ClusterRoleBinding{}, updateOrDeleteOnlyPred) + + if r.Flags.EnableGatewayRoute { + bld = bld.Owns(&routev1.Route{}, updateOrDeleteOnlyPred) + } else { + bld = bld.Owns(&networkingv1.Ingress{}, updateOrDeleteOnlyPred) + } + + return bld.Complete(r) +} diff --git a/operator/controllers/lokistack_controller_test.go b/operator/controllers/lokistack_controller_test.go new file mode 100644 index 0000000000..162d090a16 --- /dev/null +++ b/operator/controllers/lokistack_controller_test.go @@ -0,0 +1,153 @@ +package controllers + +import ( + "flag" + "io/ioutil" + "os" + "testing" + + "github.com/ViaQ/logerr/log" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s/k8sfakes" + "github.com/grafana/loki-operator/internal/manifests" + routev1 "github.com/openshift/api/route/v1" + "github.com/stretchr/testify/require" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var scheme = runtime.NewScheme() + +func TestMain(m *testing.M) { + testing.Init() + flag.Parse() + + if testing.Verbose() { + // set to the highest for verbose testing + log.SetLogLevel(5) + } else { + if err := log.SetOutput(ioutil.Discard); err != nil { + // This would only happen if the default logger was changed which it hasn't so + // we can assume that a panic is necessary and the developer is to blame. + panic(err) + } + } + + // Register the clientgo and CRD schemes + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(routev1.AddToScheme(scheme)) + utilruntime.Must(lokiv1beta1.AddToScheme(scheme)) + + log.Init("testing") + os.Exit(m.Run()) +} + +func TestLokiStackController_RegistersCustomResourceForCreateOrUpdate(t *testing.T) { + b := &k8sfakes.FakeBuilder{} + k := &k8sfakes.FakeClient{} + c := &LokiStackReconciler{Client: k, Scheme: scheme} + + b.ForReturns(b) + b.OwnsReturns(b) + + err := c.buildController(b) + require.NoError(t, err) + + // Require only one For-Call for the custom resource + require.Equal(t, 1, b.ForCallCount()) + + // Require For-call options to have create and update predicates + obj, opts := b.ForArgsForCall(0) + require.Equal(t, &lokiv1beta1.LokiStack{}, obj) + require.Equal(t, opts[0], createOrUpdateOnlyPred) +} + +func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *testing.T) { + k := &k8sfakes.FakeClient{} + + // Require owned resources + type test struct { + obj client.Object + index int + flags manifests.FeatureFlags + pred builder.OwnsOption + } + table := []test{ + { + obj: &corev1.ConfigMap{}, + index: 0, + pred: updateOrDeleteOnlyPred, + }, + { + obj: &corev1.ServiceAccount{}, + index: 1, + pred: updateOrDeleteOnlyPred, + }, + { + obj: &corev1.Service{}, + index: 2, + pred: updateOrDeleteOnlyPred, + }, + { + obj: &appsv1.Deployment{}, + index: 3, + pred: updateOrDeleteOnlyPred, + }, + { + obj: &appsv1.StatefulSet{}, + index: 4, + pred: updateOrDeleteOnlyPred, + }, + { + obj: &rbacv1.ClusterRole{}, + index: 5, + pred: updateOrDeleteOnlyPred, + }, + { + obj: &rbacv1.ClusterRoleBinding{}, + index: 6, + pred: updateOrDeleteOnlyPred, + }, + { + obj: &networkingv1.Ingress{}, + index: 7, + flags: manifests.FeatureFlags{ + EnableGatewayRoute: false, + }, + pred: updateOrDeleteOnlyPred, + }, + { + obj: &routev1.Route{}, + index: 7, + flags: manifests.FeatureFlags{ + EnableGatewayRoute: true, + }, + pred: updateOrDeleteOnlyPred, + }, + } + for _, tst := range table { + b := &k8sfakes.FakeBuilder{} + b.ForReturns(b) + b.OwnsReturns(b) + + c := &LokiStackReconciler{Client: k, Scheme: scheme, Flags: tst.flags} + err := c.buildController(b) + require.NoError(t, err) + + // Require Owns-Calls for all owned resources + require.Equal(t, 8, b.OwnsCallCount()) + + // Require Owns-call options to have delete predicate only + obj, opts := b.OwnsArgsForCall(tst.index) + require.Equal(t, tst.obj, obj) + require.Equal(t, tst.pred, opts[0]) + } +} diff --git a/operator/docs/forwarding_logs_to_gateway.md b/operator/docs/forwarding_logs_to_gateway.md new file mode 100644 index 0000000000..88a0c538db --- /dev/null +++ b/operator/docs/forwarding_logs_to_gateway.md @@ -0,0 +1,128 @@ +# Forwarding Logs to LokiStack + +This document will describe how to send application, infrastructure, and audit logs to the Lokistack Gateway as different tenants using Promtail or Fluentd. The built-in gateway provides secure access to the distributor (and query-frontend) via consulting an OAuth/OIDC endpoint for the request subject. + +__Please read the [hacking guide](./hack_loki_operator.md) before proceeding with the following instructions.__ + +_Note: While this document will only give instructions for two methods of log forwarding into the gateway, the examples given in the Promtail and Fluentd sections can be extrapolated to other log forwarders._ + +## Openshift Logging + +Although there is a way to [forward logs to an external Loki instance](https://docs.openshift.com/container-platform/4.9/logging/cluster-logging-external.html#cluster-logging-collector-log-forward-loki_cluster-logging-external), [Openshift Logging](https://github.com/openshift/cluster-logging-operator) does not currently have support to send logs through the Lokistack Gateway. + +Support will be added in the near future. + +## Forwarding Clients + +In order to enable communication between the client(s) and the gateway, follow these steps: + +1. Deploy the Loki Operator and an `lokistack` instance with the [gateway flag enabled](./hack_loki_operator.md#hacking-on-loki-operator-on-openshift). + +2. Create a `ServiceAccount` to generate the `Secret` which will be used to authorize the forwarder. + +```console +kubectl -n openshift-logging create serviceaccount +``` + +3. Configure the forwarder and deploy it to the `openshift-logging` namespace. + +4. Create the following `ClusterRole` and `ClusterRoleBinding` which will allow the cluster to authenticate the user(s) submitting the logs: + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lokistack-dev-tenant-logs-role +rules: +- apiGroups: + - 'loki.openshift.io' + resources: + - application + - infrastructure + - audit + resourceNames: + - logs + verbs: + - 'get' + - 'create' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: lokistack-dev-tenant-logs-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: lokistack-dev-tenant-logs-role +subjects: +- kind: ServiceAccount + name: "" + namespace: openshift-logging +``` + +### Promtail + +[Promtail](https://grafana.com/docs/loki/latest/clients/promtail/) is an agent managed by Grafana which forwards logs to a Loki instance. The Grafana documentation can be consulted for [configuring](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#configuration-file-reference) and [deploying](https://grafana.com/docs/loki/latest/clients/promtail/installation/#kubernetes) an instance of Promtail in a Kubernetes cluster. + +To configure Promtail to send application, audit, and infrastructure logs, add the following clients to the Promtail configuration + +```yaml +clients: + - # ... + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + url: http://lokistack-gateway-http-lokistack-dev.openshift-logging.svc:8080/api/logs/v1/audit/loki/api/v1/push + - # ... + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + url: http://lokistack-gateway-http-lokistack-dev.openshift-logging.svc:8080/api/logs/v1/application/loki/api/v1/push + - # ... + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + url: http://lokistack-gateway-http-lokistack-dev.openshift-logging.svc:8080/api/logs/v1/infrastructure/loki/api/v1/push +``` + +The rest of the configuration can be configured to the developer's desire. + +### Fluentd + +Loki can receive logs from Fluentd via the [Grafana plugin](https://grafana.com/docs/loki/latest/clients/fluentd/). + +The Fluentd configuration can be overrided to target the `application` endpoint to send those log types. + +``` + + @type loki + # ... + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + url: http://lokistack-gateway-http-lokistack-dev.openshift-logging.svc:8080/api/logs/v1/application + +``` + +## Troubleshooting + +### Log Entries Out of Order + +If the forwarder is configured to send too much data in a short span of time, Loki will back-pressure the forwarder and respond to the POST requests with `429` errors. In order to alleviate this, a few changes could be made to the spec: + +* Consider moving up a t-shirt size. This will bring in addition resources and have a higher ingestion rate. + +```console +kubectl -n openshift-logging edit lokistack +``` + +```yaml +size: 1x.medium +``` + +* Manually change the ingestion rate (global or tenant) can be changed via configuration changes to `lokistack`: + +```console +kubectl -n openshift-logging edit lokistack +``` + +```yaml +limits: + tenants: + 4a5bb098-7caf-42ec-9b1a-8e1d979bfb95: + IngestionLimits: + IngestionRate: 15 +``` diff --git a/operator/docs/hack_loki_operator.md b/operator/docs/hack_loki_operator.md new file mode 100644 index 0000000000..112f65f3a8 --- /dev/null +++ b/operator/docs/hack_loki_operator.md @@ -0,0 +1,247 @@ +# Hacking on Loki Operator + +Loki Operator is the Kubernetes Operator for [Loki](https://grafana.com/docs/loki/latest/). + +## Hacking on Loki Operator using kind + +[kind](https://kind.sigs.k8s.io/docs/user/quick-start/) is a tool for running local Kubernetes clusters using Docker container "nodes". kind was primarily designed for testing Kubernetes itself, but may be used for local development or CI. + +### Requirements + +* Install [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) or [Openshift CLI](https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html) for communicating with the cluster. The guide below will be using `kubectl` for the same. +* Create a running Kubernetes cluster using kind. +* A container registry that you and your Kubernetes cluster can reach. We recommend [quay.io](https://quay.io/signin/). + +### Installation of Loki Operator + +* Build and push the container image and then deploy the operator with: + + ```console + make oci-build oci-push deploy REGISTRY_ORG=$YOUR_QUAY_ORG VERSION=latest + ``` + + where `$YOUR_QUAY_ORG` is your personal [quay.io](http://quay.io/) account where you can push container images. + + The above command will deploy the operator to your active Kubernetes cluster defined by your local [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/). The operator will be running in the `default` namespace. + +* You can confirm that the operator is up and running using: + + ```console + kubectl get pods + ``` + + You should see `controller-manager-xxxx` and `minio-xxxx` pods running. + +* Now create a LokiStack instance to get the various components of Loki up and running: + + ```console + kubectl apply -f hack/lokistack_dev.yaml + ``` + + This will create `distributor`, `compactor`, `ingester`, `querier` and `query-frontend` components. + + Confirm that all components are up and running for `deployments` using: + + ```console + kubectl rollout status deployment/ + ``` + + where `` is the name of the deployment and can be found using: + + ```console + kubectl get deployments + ``` + + Confirm that all are up and running for `statefulsets` using: + + ```console + kubectl rollout status statefulset/ + ``` + + where `` is the name of the statefulset and can be found using: + + ```console + kubectl get statefulsets + ``` + +### Cleanup + +To cleanup deployments of the operator, you can use: + +```console +make undeploy +``` + +It will undeploy controller from the configured Kubernetes cluster in [~/.kube/config](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#the-kubeconfig-environment-variable) + +## Hacking on Loki Operator on OpenShift + +### Requirements + +* Install [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) or [Openshift CLI](https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html) for communicating with the cluster. The guide below will be using `kubectl` for the same. +* Create a running OpenShift cluster on AWS. +* A container registry that you and your OpenShift cluster can reach. We recommend [quay.io](https://quay.io/signin/). +* Create an [S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html) in one of the AWS Regions. + +### Installation of Loki Operator + +* Build and push the container image [2] and then deploy the operator with: + + ```console + make olm-deploy REGISTRY_ORG=$YOUR_QUAY_ORG VERSION=$VERSION + ``` + + where `$YOUR_QUAY_ORG` is your personal [quay.io](http://quay.io/) account where you can push container images and `$VERSION` can be any random version number such as `v0.0.1`. + + The above command will deploy the operator to your active Openshift cluster defined by your local [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/). The operator will be running in the `openshift-logging` namespace. + +* You can confirm that the operator is up and running using: + + ```console + kubectl -n openshift-logging get pods + ``` + +* Now you need to create a storage secret for the operator. This can be done using: + + ```console + make olm-deploy-example-storage-secret + ``` + + OR + + ```console + ./hack/deploy-example-secret.sh openshift-logging + ``` + + This secret will be available in openshift-logging namespace. You can check the `hack/deploy-example-secret.sh` file to check the content of the secret. + +* Now you need to create a gateway secret [3] for the operator. This can be done using: + + ```code + kubectl -n openshift-logging create secret generic test1 \ + --from-literal=clientID="" \ + --from-literal=clientSecret="" \ + --from-literal=issuerCAPath="" + ``` + +* Once the object storage secret is created, you can now create a LokiStack instance to get the various components of Loki up and running: + + ```console + kubectl -n openshift-logging apply -f hack/lokistack_gateway_dev.yaml + ``` + + This will create `distributor`, `compactor`, `ingester`, `querier`, `query-frontend` and `lokistack-gateway` components. + + Confirm that all are up and running for `deployments` using: + + ```console + kubectl -n openshift-logging rollout status deployment/ + ``` + + where `` is the name of the deployment and can be found using: + + ```console + kubectl -n openshift-logging get deployments + ``` + + Confirm that all are up and running for `statefulsets` using: + + ```console + kubectl -n openshift-logging rollout status statefulset/ + ``` + + where `` is the name of the statefulset and can be found using: + + ```console + kubectl -n openshift-logging get statefulsets + ``` + + If you don't want `lokistack-gateway` component [1] then you can skip it by removing the `--with-lokistack-gateway` args from the `loki-operator-controller-manager` deployment: + + ```console + kubectl -n openshift-logging edit deployment/loki-operator-controller-manager + ``` + + Delete the flag `--with-lokistack-gateway` from the `args` section and save the file. This will update the deployment and now you can create LokiStack instance using: + + ```console + kubectl -n openshift-logging apply -f hack/lokistack_dev.yaml + ``` + + This will create `distributor`, `compactor`, `ingester`, `querier` and `query-frontend` components only. + +### Cleanup + +To cleanup deployments of the operator, you can use: + +```console +make olm-undeploy +``` + +It will cleanup deployments of the operator bundle, and the operator via OLM on an OpenShift cluster selected via [~/.kube/config](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#the-kubeconfig-environment-variable) + +### Notes + +[1] `lokistack-gateway` is an optional component deployed as part of Loki Operator. It provides secure access to Loki's distributor (i.e. for pushing logs) and query-frontend (i.e. for querying logs) via consulting an OAuth/OIDC endpoint for the request subject. + +[2] If you get multiple images as options, and you are required to select one of them then select `docker.io/library/golang:1.16` + +[3] The OIDC configuration expects `clientID`, `clientSecret` and `issuerCAPath` which should be provided via a Kubernetes secret that the LokiStack admin provides upfront. + +Each tenant Secret is required to match: +* `metadata.name` with `TenantsSecretsSpec.Name`. +* `metadata.namespace` with `LokiStack.metadata.namespace`. + +## Basic Troubleshooting on Hacking on Loki Operator + +### New changes are not detected by Loki Operator + +Suppose you made some changes to the Loki Operator's code and deployed it, but the changes are not visible when it runs. This happens when the deployment pulls the old image of the operator because of the `imagePullPolicy` being set to `IfNotPresent`. Therefore, you need to make some changes to make your deployment pull a new image always: + +* Go to `config/manager/manager.yaml` file. +* Set the `imagePullPolicy` to `Always` i.e., + + ```yaml + imagePullPolicy: Always + ``` + +* Deploy the operator again. + +### kubectl using old context + +It is possible that when you use two different clusters - one is kind cluster and the other is OpenShift cluster, you might need to switch between clusters to test your changes. There is a possibility that once you switch between clusters, the kubectl might not switch the context automatically and hence you might need to do this manually to correctly communicate with your cluster. + +* List all the available context: + + ```console + kubectl config get-contexts + ``` + + The `*` mark against the context shows the one in use currently. +* Set the context name you want to use now: + + ```console + kubectl config use-context $CONTEXTNAME + ``` + + where `$CONTEXTNAME` is the context name you want to use now from the previous step. + +### The Loki Operator giving Missing Secrets / Invalid Secrets error + +You have probably forgotten to create the gateway secrets because of which the operator runs in **degraded** condition. Follow the steps mentioned in the step-by-step guide to create the gateway secret first. Once done, you can now create the LokiStack instance. + +Verify this by checking the `conditions` field: + +```console +kubectl get lokistack lokistack-dev -o yaml +``` + +For OpenShift, the above command would be: + +```console +kubectl -n openshift-logging get lokistack lokistack-dev -o yaml +``` + +### The Loki Operator giving Mandatory Configuration / Incompatible Configuration error + +This usually happens when the LokiStack CR is wrongly configured for the lokistack-gateway. Please read the [enhancement proposal](https://github.com/openshift/enhancements/blob/master/enhancements/cluster-logging/loki-gateway-configuration.md) to figure out the correct way to configure it. diff --git a/operator/docs/hack_operator_make_run.md b/operator/docs/hack_operator_make_run.md new file mode 100644 index 0000000000..18929190d6 --- /dev/null +++ b/operator/docs/hack_operator_make_run.md @@ -0,0 +1,274 @@ +# Hacking on Loki Operator using `make run` + +This document demonstrates how to use Loki Operator for development and testing locally on Kind and OpenShift using the `make run` command. + +_Note:_ This is helpful when you don't want to deploy the Loki Operator image everytime you make slight changes to the operator code. + +## Hacking on Loki Operator using kind + +[kind](https://kind.sigs.k8s.io/docs/user/quick-start/) is a tool for running local Kubernetes clusters using Docker container "nodes". kind was primarily designed for testing Kubernetes itself, but may be used for local development or CI. + +### Requirements + +* Install [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) or [Openshift CLI](https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html) for communicating with the cluster. The guide below will be using `kubectl` for the same. +* Create a running Kubernetes cluster using kind. + +### Installation of Loki Operator + +* Install the CRDs into the cluster: + + ```console + make install + ``` + + This will create a custom resource definition with the name `lokistacks.loki.openshift.io` which can be verified using: + + ```console + kubectl get crd lokistacks.loki.openshift.io + ``` + +* Create a minio deployment in the cluster using: + + ```console + kubectl apply -f config/overlays/development/minio + ``` + + This creates minio's `deployment`, `service`, `pvc` and `secret` in the `default` namespace. + +* Now create a LokiStack instance using: + + ```console + kubectl apply -f hack/lokistack_dev.yaml + ``` + +* Now run the operator locally on your machine: + + ```console + make run + ``` + + This will start the loki operator locally, recognize the `LokiStack` CRD instance and also creates `distributor`, `compactor`, `ingester`, `querier` and `query-frontend` components. + + Confirm that all components are up and running for `deployments` using: + + ```console + kubectl rollout status deployment/ + ``` + + where `` is the name of the deployment and can be found using: + + ```console + kubectl get deployments + ``` + + Confirm that all are up and running for `statefulsets` using: + + ```console + kubectl rollout status statefulset/ + ``` + + where `` is the name of the statefulset and can be found using: + + ```console + kubectl get statefulsets + ``` + +* If you make some changes to the operator's code, then just stop the operator locally using `CTRL + C`, update the code and rerun the operator locally: + + ```console + make run + ``` + + This saves time by not deploying the operator again and again. + +* When everything works fine, for the final testing deploy everything to the cluster using this [document](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#hacking-on-loki-operator-using-kind). + +### Cleanup + +* Stop the operator from running locally by pressing `CTRL + C`. + +* Cleanup the LokiStack instance, CRDs, deployments on the cluster using: + + ```console + make uninstall + ``` + +* Cleanup the minio deployment using: + + ```console + kubectl delete -f config/overlays/development/minio + ``` + +## Hacking on Loki Operator on OpenShift + +### Requirements + +* Install [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) or [Openshift CLI](https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html) for communicating with the cluster. The guide below will be using `kubectl` for the same. +* Create a running OpenShift cluster on AWS. +* Create an [S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html) in one of the AWS Regions. + +### Installation of Loki Operator + +* Install the CRDs into the cluster: + + ```console + make install + ``` + + This will create a custom resource definition with the name `lokistacks.loki.openshift.io` which can be verified using: + + ```console + kubectl get crd lokistacks.loki.openshift.io + ``` + +* Create the `openshift-logging` namespace in the cluster: + + ```console + kubectl create ns openshift-logging + ``` + +* Now you need to create a storage secret for the operator. This can be done using: + + ```console + make olm-deploy-example-storage-secret + ``` + + OR + + ```console + ./hack/deploy-example-secret.sh openshift-logging + ``` + + This secret will be available in openshift-logging namespace. You can check the `hack/deploy-example-secret.sh` file to check the content of the secret. + +* Once the object storage secret is created, you can now create a LokiStack instance: + + ```console + kubectl -n openshift-logging apply -f hack/lokistack_dev.yaml + ``` + +* Now run the operator locally on your machine: + + ```console + make run + ``` + + This will create `distributor`, `compactor`, `ingester`, `querier` and `query-frontend` components only. + + Confirm that all are up and running for `deployments` using: + + ```console + kubectl -n openshift-logging rollout status deployment/ + ``` + + where `` is the name of the deployment and can be found using: + + ```console + kubectl -n openshift-logging get deployments + ``` + + Confirm that all are up and running for `statefulsets` using: + + ```console + kubectl -n openshift-logging rollout status statefulset/ + ``` + + where `` is the name of the statefulset and can be found using: + + ```console + kubectl -n openshift-logging get statefulsets + ``` + +* If you want `lokistack-gateway` component [1] to be deployed then you need to create a gateway secret [2] for the operator. This can be done using: + + ```code + kubectl -n openshift-logging create secret generic test1 \ + --from-literal=clientID="" \ + --from-literal=clientSecret="" \ + --from-literal=issuerCAPath="" + ``` + +* Now create a LokiStack instance using: + + ```console + kubectl -n openshift-logging apply -f hack/lokistack_gateway_dev.yaml + ``` + +* Edit the [main file](https://github.com/grafana/loki/blob/master/operator/main.go) to set the flag values to `true` and rerun the operator using: + + ```console + make run + ``` + + This will create `distributor`, `compactor`, `ingester`, `querier`, `query-frontend` and `lokistack-gateway` components. + +* If you make some changes to the operator's code, then just stop the operator locally using `CTRL + C`, update the code and rerun the operator locally: + + ```console + make run + ``` + + This saves time by not deploying the operator again and again. + +* When everything works fine, for the final testing deploy everything to the cluster using this [document](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#hacking-on-loki-operator-on-openshift). + +### Cleanup + +* Stop the operator from running locally by pressing `CTRL + C`. + +* Cleanup the LokiStack instance, CRDs, deployments on the cluster using: + + ```console + make uninstall + ``` + +### Notes + +[1] `lokistack-gateway` is an optional component deployed as part of Loki Operator. It provides secure access to Loki's distributor (i.e. for pushing logs) and query-frontend (i.e. for querying logs) via consulting an OAuth/OIDC endpoint for the request subject. + +[2] The OIDC configuration expects `clientID`, `clientSecret` and `issuerCAPath` which should be provided via a Kubernetes secret that the LokiStack admin provides upfront. + +Each tenant Secret is required to match: +* `metadata.name` with `TenantsSecretsSpec.Name`. +* `metadata.namespace` with `LokiStack.metadata.namespace`. + +## Basic Troubleshooting on Hacking on Loki Operator + +### kubectl using old context + +It is possible that when you use two different clusters - one is kind cluster and the other is OpenShift cluster, you might need to switch between clusters to test your changes. There is a possibility that once you switch between clusters, the kubectl might not switch the context automatically and hence you might need to do this manually to correctly communicate with your cluster. + +* List all the available context: + + ```console + kubectl config get-contexts + ``` + + The `*` mark against the context shows the one in use currently. +* Set the context name you want to use now: + + ```console + kubectl config use-context $CONTEXTNAME + ``` + + where `$CONTEXTNAME` is the context name you want to use now from the previous step. + +### The Loki Operator giving Missing Secrets / Invalid Secrets error + +You have probably forgotten to create the gateway secrets because of which the operator runs in **degraded** condition. Follow the steps mentioned in the step-by-step guide to create the gateway secret first. Once done, you can now create the LokiStack instance. + +Verify this by checking the `conditions` field: + +```console +kubectl get lokistack lokistack-dev -o yaml +``` + +For OpenShift, the above command would be: + +```console +kubectl -n openshift-logging get lokistack lokistack-dev -o yaml +``` + +### The Loki Operator giving Mandatory Configuration / Incompatible Configuration error + +This usually happens when the LokiStack CR is wrongly configured for the lokistack-gateway. Please read the [enhancement proposal](https://github.com/openshift/enhancements/blob/master/enhancements/cluster-logging/loki-gateway-configuration.md) to figure out the correct way to configure it. diff --git a/operator/docs/storage_size_calculator.md b/operator/docs/storage_size_calculator.md new file mode 100644 index 0000000000..589720cce7 --- /dev/null +++ b/operator/docs/storage_size_calculator.md @@ -0,0 +1,102 @@ +# Install Storage Size Calculator on OpenShift + +This document demonstrates how to install storage size calculator for loki on OpenShift. + +_Note:_ The storage size calculator works out of the box on OpenShift. + +## Introduction + +Storage Size Calculator is used to have an idea on how to properly size a Loki cluster. It spins up a log collector that is used to collect metrics for a period of time and based on the amount of logs being collected, extrapolate the amount of logs required for a day and from that recommend a t-shirt size. + +## Requirements + +* Create a running OpenShift cluster. +* A container registry that you and your OpenShift cluster can reach. We recommend [quay.io](https://quay.io/signin/). + +## Installation + +* Deploy the [Loki Operator](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#hacking-on-loki-operator-on-openshift) to the cluster. + +* Deploy the storage size calculator by executing following command in the terminal: + + ```console + make deploy-size-calculator REGISTRY_ORG=$YOUR_QUAY_ORG + ``` + + where `$YOUR_QUAY_ORG` is your personal [quay.io](http://quay.io/) account where you can push container images. + + You should see `log-file-metric-exporter-xxx` and `storage-size-calculator-xxx` pods running. + + _Note:_ [log-file-metric-exporter](https://github.com/ViaQ/log-file-metric-exporter) is used to collect metrics about container logs being produced in a kubernetes environment. It publishes `log_logged_bytes_total` metric to prometheus. + +* Now you can check the logs to see the recommended t-shirt size for your cluster: + + ```console + kubectl -n openshift-logging logs + ``` + + where `` is the name of the storage size calculator pod and can be found using: + + ```console + kubectl -n openshift-logging get pods + ``` + + _Note:_ The storage size calculator logs the recommended t-shirt size every minute. + +## Cleanup + +To cleanup the deployment you can use: + +```console +make undeploy-size-calculator +``` + +This will cleanup the resources related to storage size calculator. However, the Loki Operator would still be running. + +## Contribution + +If you want to contribute to the storage size calculator, you can follow this local development and testing process. + +* Fork and clone the [Loki Operator](https://github.com/grafana/loki/blobs/master/operator) repo. + +* All the files related to storage size calculator exists at [`config/overlays/openshift/size-calculator`](https://github.com/grafana/loki/tree/master/operator/config/overlays/openshift/size-calculator) and the main file is at [`cmd/size-calculator`](https://github.com/grafana/loki/tree/master/operator/cmd/size-calculator). + +* Update the code to fix a bug or add a new feature. + +* To test the changes made, build the image and push it to quay. Replace [here](https://github.com/grafana/loki/blob/master/operator/config/overlays/openshift/size-calculator/storage_size_calculator.yaml#L18) with your quay image to test the changes. + + Build the image using: + + ```console + make oci-build-calculator + ``` + + This will build the storage size calculator image using [dockerfile](https://github.com/grafana/loki/blob/master/operator/calculator.Dockerfile) + + Push the image to quay using: + + ```console + make oci-push-calculator + ``` + + After replacing the image name, deploy the storage size calculator to test your changes: + + ```console + make deploy-size-calculator REGISTRY_ORG=$YOUR_QUAY_ORG + ``` + + where `$YOUR_QUAY_ORG` is your personal [quay.io](http://quay.io/) account where you pushed your container image. + +* If everything works fine then create a pull request. + +## Troubleshooting + +### Permission denied on deploying prometheus secret + +If you get `permission denied` error while running `make deploy-size-calculator` then make [this](https://github.com/grafana/loki/blob/master/operator/hack/deploy-prometheus-secret.sh) file executable by running: + +```console +chmod +x hack/deploy-prometheus-secret.sh +``` + +Now rerun the same `make deploy-size-calculator` again and it should work fine. diff --git a/operator/go.mod b/operator/go.mod new file mode 100644 index 0000000000..eecbbd6915 --- /dev/null +++ b/operator/go.mod @@ -0,0 +1,22 @@ +module github.com/grafana/loki-operator + +go 1.16 + +require ( + github.com/ViaQ/logerr v1.0.10 + github.com/go-logr/logr v0.4.0 + github.com/google/uuid v1.1.2 + github.com/imdario/mergo v0.3.12 + github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0 + github.com/openshift/api v0.0.0-20210901140736-d8ed1449662d // release-4.9 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.0 + github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/common v0.32.0 + github.com/stretchr/testify v1.7.0 + k8s.io/api v0.22.1 + k8s.io/apimachinery v0.22.1 + k8s.io/client-go v0.22.1 + k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 + sigs.k8s.io/controller-runtime v0.9.2 + sigs.k8s.io/yaml v1.2.0 +) diff --git a/operator/go.sum b/operator/go.sum new file mode 100644 index 0000000000..c65312c76b --- /dev/null +++ b/operator/go.sum @@ -0,0 +1,1009 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/ViaQ/logerr v1.0.10 h1:ZSWC+n9cOCIrwXUYk9mSU96OhmdcGp5qDCNfPTBElVU= +github.com/ViaQ/logerr v1.0.10/go.mod h1:KZ3ne81U/sJhHt3AjE5AvhoQDY0Rh1O+u4rEHKjG/No= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= +github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8= +github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= +github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0 h1:8E6DrFvII6QR4eJ3PkFvV+lc03P+2qwqTPLm1ax7694= +github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0/go.mod h1:fcEyUyXZXoV4Abw8DX0t7wyL8mCDxXyU4iAFZfT3IHw= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/openshift/api v0.0.0-20210901140736-d8ed1449662d h1:2QcWZUp0R+ewJrK2Iuj8WaZikl/KccB2+/LOhB7RhEk= +github.com/openshift/api v0.0.0-20210901140736-d8ed1449662d/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8= +github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.0 h1:klFBev4UPGvhr3GF2b73Q1omlzZVONAhLwDhcQX0+4E= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.0 h1:HRmM4uANZDAjdvbsdfOoqI5UDbjz0faKeMs/cGPKKI0= +github.com/prometheus/common v0.32.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= +github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= +k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= +k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= +k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= +k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= +k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE= +k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= +k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= +k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= +k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= +k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= +k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= +k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw= +k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= +k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= +k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= +k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= +k8s.io/component-base v0.21.2 h1:EsnmFFoJ86cEywC0DoIkAUiEV6fjgauNugiw1lmIjs4= +k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM= +k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/controller-runtime v0.9.2 h1:MnCAsopQno6+hI9SgJHKddzXpmv2wtouZz6931Eax+Q= +sigs.k8s.io/controller-runtime v0.9.2/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/operator/hack/addons.yaml b/operator/hack/addons.yaml new file mode 100644 index 0000000000..27853b6570 --- /dev/null +++ b/operator/hack/addons.yaml @@ -0,0 +1,448 @@ +# This file is used to create additional objects to help development of the operator +# within a cluster. logcli pod helps write queries, promtail writes logs, etc +--- +apiVersion: v1 +kind: Pod +metadata: + name: logcli + namespace: loki + labels: + app.kubernetes.io/name: logcli +spec: + containers: + - name: logcli + image: docker.io/grafana/logcli:2.2.0-amd64 + env: + - name: LOKI_ADDR + value: http://loki-querier-http-lokistack-sample.loki.svc.cluster.local:3100 + command: [ "/bin/sh", "-c", "--" ] + args: [ "while true; do sleep 30; done;" ] + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loki-promtail + namespace: loki + labels: + app.kubernetes.io/name: promtail +spec: + selector: + matchLabels: + app.kubernetes.io/name: promtail + template: + metadata: + labels: + app.kubernetes.io/name: promtail + annotations: + prometheus.io/port: metrics + prometheus.io/scrape: "true" + spec: + containers: + - args: + - -config.file=/etc/promtail/promtail.yaml + - -client.url=http://loki-distributor-http-lokistack-sample.loki.svc.cluster.local:3100/api/prom/push + - -log.level=info + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: docker.io/grafana/promtail:2.1.0 + imagePullPolicy: IfNotPresent + name: promtail + ports: + - containerPort: 3101 + name: metrics + protocol: TCP + readinessProbe: + failureThreshold: 5 + httpGet: + path: /ready + port: metrics + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: {} + securityContext: + procMount: Default + readOnlyRootFilesystem: true + runAsGroup: 0 + runAsUser: 0 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/promtail + name: config + - mountPath: /run/promtail + name: run + - mountPath: /var/lib/docker/containers + name: docker + readOnly: true + - mountPath: /var/log/pods + name: pods + readOnly: true + - mountPath: /var/log/journal + name: journal + readOnly: true + serviceAccountName: loki-promtail + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - configMap: + defaultMode: 420 + name: loki-promtail + name: config + - hostPath: + path: /run/promtail + type: "" + name: run + - hostPath: + path: /var/lib/docker/containers + type: "" + name: docker + - hostPath: + path: /var/log/pods + type: "" + name: pods + - hostPath: + path: /var/log/journal + type: "" + name: journal + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: loki-promtail + namespace: loki + labels: + app.kubernetes.io/name: promtail +data: + promtail.yaml: | + client: + backoff_config: + min_period: 100ms + max_period: 5s + max_retries: 5 + batchsize: 102400 + batchwait: 1s + external_labels: {} + timeout: 10s + positions: + filename: /run/promtail/positions.yaml + server: + http_listen_port: 3101 + target_config: + sync_period: 10s + scrape_configs: + - job_name: journal + journal: + max_age: 12h + path: /var/log/journal + labels: + job: systemd-journal + relabel_configs: + - source_labels: + - __journal__systemd_unit + target_label: unit + - source_labels: + - __journal__hostname + target_label: hostname + - job_name: kubernetes-pods-name + pipeline_stages: + - docker: {} + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_name + target_label: __service__ + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: drop + regex: ^$ + source_labels: + - __service__ + - action: replace + replacement: $1 + separator: / + source_labels: + - __meta_kubernetes_namespace + - __service__ + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: instance + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container_name + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - job_name: kubernetes-pods-app + pipeline_stages: + - docker: {} + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: drop + regex: .+ + source_labels: + - __meta_kubernetes_pod_label_name + - source_labels: + - __meta_kubernetes_pod_label_app + target_label: __service__ + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: drop + regex: ^$ + source_labels: + - __service__ + - action: replace + replacement: $1 + separator: / + source_labels: + - __meta_kubernetes_namespace + - __service__ + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: instance + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container_name + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - job_name: kubernetes-pods-direct-controllers + pipeline_stages: + - docker: {} + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: drop + regex: .+ + separator: '' + source_labels: + - __meta_kubernetes_pod_label_name + - __meta_kubernetes_pod_label_app + - action: drop + regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$ + source_labels: + - __meta_kubernetes_pod_controller_name + - source_labels: + - __meta_kubernetes_pod_controller_name + target_label: __service__ + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: drop + regex: ^$ + source_labels: + - __service__ + - action: replace + replacement: $1 + separator: / + source_labels: + - __meta_kubernetes_namespace + - __service__ + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: instance + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: + container_name + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - job_name: kubernetes-pods-indirect-controller + pipeline_stages: + - docker: {} + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: drop + regex: .+ + separator: '' + source_labels: + - __meta_kubernetes_pod_label_name + - __meta_kubernetes_pod_label_app + - action: keep + regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$ + source_labels: + - __meta_kubernetes_pod_controller_name + - action: replace + regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$ + source_labels: + - __meta_kubernetes_pod_controller_name + target_label: __service__ + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: drop + regex: ^$ + source_labels: + - __service__ + - action: replace + replacement: $1 + separator: / + source_labels: + - __meta_kubernetes_namespace + - __service__ + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: instance + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container_name + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - job_name: kubernetes-pods-static + pipeline_stages: + - docker: {} + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: drop + regex: ^$ + source_labels: + - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror + - action: replace + source_labels: + - __meta_kubernetes_pod_label_component + target_label: __service__ + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: drop + regex: ^$ + source_labels: + - __service__ + - action: replace + replacement: $1 + separator: / + source_labels: + - __meta_kubernetes_namespace + - __service__ + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: instance + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container_name + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror + - __meta_kubernetes_pod_container_name + target_label: __path__ + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: loki-promtail + namespace: loki + labels: + app.kubernetes.io/name: promtail + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: loki-promtail-clusterrole + labels: + app.kubernetes.io/name: promtail +rules: +- apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - services + - endpoints + - pods + verbs: + - get + - watch + - list + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: loki-promtail-clusterrolebinding + labels: + app.kubernetes.io/name: promtail +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: loki-promtail-clusterrole +subjects: +- kind: ServiceAccount + name: loki-promtail + namespace: loki diff --git a/operator/hack/boilerplate.go.txt b/operator/hack/boilerplate.go.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/operator/hack/deploy-example-secret.sh b/operator/hack/deploy-example-secret.sh new file mode 100755 index 0000000000..3f4456c07b --- /dev/null +++ b/operator/hack/deploy-example-secret.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -eou pipefail + +NAMESPACE=$1 + +REGION="" +ENDPOINT="" +ACCESS_KEY_ID="" +SECRET_ACCESS_KEY="" +LOKI_BUCKET_NAME="${LOKI_BUCKET_NAME:-loki}" + +set_credentials_from_aws() { + REGION="$(aws configure get region)" + ACCESS_KEY_ID="$(aws configure get aws_access_key_id)" + SECRET_ACCESS_KEY="$(aws configure get aws_secret_access_key)" + ENDPOINT="https://s3.${REGION}.amazonaws.com" +} + +create_secret() { + kubectl -n "${NAMESPACE}" delete secret test ||: + kubectl -n "${NAMESPACE}" create secret generic test \ + --from-literal=endpoint="$(echo -n "${ENDPOINT}")" \ + --from-literal=region="$(echo -n "${REGION}")" \ + --from-literal=bucketnames="$(echo -n "${LOKI_BUCKET_NAME}")" \ + --from-literal=access_key_id="$(echo -n "${ACCESS_KEY_ID}")" \ + --from-literal=access_key_secret="$(echo -n "${SECRET_ACCESS_KEY}")" +} + +main() { + set_credentials_from_aws + create_secret +} + +main diff --git a/operator/hack/deploy-prometheus-secret.sh b/operator/hack/deploy-prometheus-secret.sh new file mode 100755 index 0000000000..75eea17219 --- /dev/null +++ b/operator/hack/deploy-prometheus-secret.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -eou pipefail + +USER_WORKLOAD_NAMESPACE=openshift-user-workload-monitoring +NAMESPACE=openshift-logging + +secret=$(kubectl -n "${USER_WORKLOAD_NAMESPACE}" get secret | grep prometheus-user-workload-token | head -n 1 | awk '{print $1 }') +PROMETHEUS_URL="https://$(kubectl -n openshift-monitoring get route thanos-querier -o json | jq -r '.spec.host')" +PROMETHEUS_TOKEN=$(kubectl -n "${USER_WORKLOAD_NAMESPACE}" get secret "${secret}" -o json | jq -r '.data.token' | base64 -d) + +kubectl -n "${NAMESPACE}" delete secret promsecret ||: +kubectl -n "${NAMESPACE}" create secret generic promsecret \ + --from-literal=prometheus_url="$(echo -n "${PROMETHEUS_URL}")" \ + --from-literal=prometheus_token="$(echo -n "${PROMETHEUS_TOKEN}")" diff --git a/operator/hack/lokistack_dev.yaml b/operator/hack/lokistack_dev.yaml new file mode 100644 index 0000000000..2649ebe9ff --- /dev/null +++ b/operator/hack/lokistack_dev.yaml @@ -0,0 +1,11 @@ +apiVersion: loki.grafana.com/v1beta1 +kind: LokiStack +metadata: + name: lokistack-dev +spec: + size: 1x.extra-small + replicationFactor: 1 + storage: + secret: + name: test + storageClassName: standard diff --git a/operator/hack/lokistack_gateway_dev.yaml b/operator/hack/lokistack_gateway_dev.yaml new file mode 100644 index 0000000000..aee3cafc6b --- /dev/null +++ b/operator/hack/lokistack_gateway_dev.yaml @@ -0,0 +1,40 @@ +apiVersion: loki.grafana.com/v1beta1 +kind: LokiStack +metadata: + name: lokistack-dev +spec: + size: 1x.extra-small + replicationFactor: 1 + storage: + secret: + name: test + storageClassName: gp2 + tenants: + mode: static + authentication: + - tenantName: tenant-a + tenantId: test + oidc: + secret: + name: test1 + issuerURL: https://127.0.0.1:5556/dex + redirectURL: https://localhost:8443/oidc/tenant-a/callback + usernameClaim: test + groupClaim: test + authorization: + roleBindings: + - name: tenant-a + roles: + - read-write + subjects: + - kind: user + name: admin@example.com + roles: + - name: read-write + permissions: + - read + - write + resources: + - metrics + tenants: + - tenant-a diff --git a/operator/hack/lokistack_gateway_ocp.yaml b/operator/hack/lokistack_gateway_ocp.yaml new file mode 100644 index 0000000000..8297be48e6 --- /dev/null +++ b/operator/hack/lokistack_gateway_ocp.yaml @@ -0,0 +1,13 @@ +apiVersion: loki.grafana.com/v1beta1 +kind: LokiStack +metadata: + name: lokistack-dev +spec: + size: 1x.extra-small + replicationFactor: 1 + storage: + secret: + name: test + storageClassName: gp2 + tenants: + mode: openshift-logging diff --git a/operator/img/loki-operator.png b/operator/img/loki-operator.png new file mode 100644 index 0000000000..5325711b66 Binary files /dev/null and b/operator/img/loki-operator.png differ diff --git a/operator/index.md b/operator/index.md new file mode 100644 index 0000000000..a9a5c11f2a --- /dev/null +++ b/operator/index.md @@ -0,0 +1,23 @@ +## Welcome to Loki Operator + +This is the Kubernetes Operator for Loki provided by the Grafana Loki SIG operator. This is currently a work in progress and is subject to large scale changes that will break any dependencies. Do not use this in any production environment. + +### Hacking on Loki Operator on kind or OpenShift + +* If you want to contribute to this repository, you might need a step-by-step guide on how to start [hacking on Loki-operator with kind](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#hacking-on-loki-operator-using-kind). +* Also, there is a step-by-step guide on how to test Loki-operator on [OpenShift](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#hacking-on-loki-operator-on-openshift). +* There is also a [basic troubleshooting guide](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#basic-troubleshooting-on-hacking-on-loki-operator) if you run into some common problems. +* There is also a [document](https://github.com/grafana/loki/blob/master/operator/docs/hack_operator_make_run.md) which demonstrates how to use Loki Operator for development and testing locally without deploying the operator each time on Kind and OpenShift using the `make run` command. + +### Sending Logs to Loki through the Gateway Component + +* The [forwarding logs to LokiStack guide](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md) provides instructions for configuring forwarding clients to ship logs to Loki through the gateway component. +* This section details [how to connect a Promtail](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md#promtail) installation to the gateway. +* This section details [how to connect a Grafana Fluentd plugin](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md#fluentd) installation to the gateway. + +### Installation of Storage Size Calculator on OpenShift + +* Storage size calculator works out of the box on OpenShift. For non-openshift distributions you will need to create services like prometheus, serviceMonitor, scrape configuration for log-file-metric exporter, promsecret to access the custom prometheus URL, token. +* The step-by-step guide on how to install [storage size calculator](https://github.com/grafana/loki/blob/master/operator/docs/storage_size_calculator.md) on OpenShift is available. +* Also, there is a step-by-step guide on how to [contribute](https://github.com/grafana/loki/blob/master/operator/docs/storage_size_calculator.md#contribution) to this along with local development and testing procedure. +* There is also a [basic troubleshooting guide](https://github.com/grafana/loki/blob/master/operator/docs/storage_size_calculator.md#troubleshooting) if you run into some common problems. diff --git a/operator/internal/external/k8s/builder.go b/operator/internal/external/k8s/builder.go new file mode 100644 index 0000000000..5f9270d464 --- /dev/null +++ b/operator/internal/external/k8s/builder.go @@ -0,0 +1,67 @@ +package k8s + +import ( + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// Builder is a controller-runtime interface used internally. It copies function from +// sigs.k8s.io/controller-runtime/pkg/builder +// +//counterfeiter:generate . Builder +type Builder interface { + For(object client.Object, opts ...builder.ForOption) Builder + Owns(object client.Object, opts ...builder.OwnsOption) Builder + WithEventFilter(p predicate.Predicate) Builder + WithOptions(options controller.Options) Builder + WithLogger(log logr.Logger) Builder + Named(name string) Builder + Complete(r reconcile.Reconciler) error + Build(r reconcile.Reconciler) (controller.Controller, error) +} + +type ctrlBuilder struct { + bld *builder.Builder +} + +// NewCtrlBuilder returns a self-referencing controlled builder +// passthrough wrapper implementing the Builder interface above. +func NewCtrlBuilder(b *builder.Builder) Builder { + return &ctrlBuilder{bld: b} +} + +func (b *ctrlBuilder) For(object client.Object, opts ...builder.ForOption) Builder { + return &ctrlBuilder{bld: b.bld.For(object, opts...)} +} + +func (b *ctrlBuilder) Owns(object client.Object, opts ...builder.OwnsOption) Builder { + return &ctrlBuilder{bld: b.bld.Owns(object, opts...)} +} + +func (b *ctrlBuilder) WithEventFilter(p predicate.Predicate) Builder { + return &ctrlBuilder{bld: b.bld.WithEventFilter(p)} +} + +func (b *ctrlBuilder) WithOptions(opts controller.Options) Builder { + return &ctrlBuilder{bld: b.bld.WithOptions(opts)} +} + +func (b *ctrlBuilder) WithLogger(log logr.Logger) Builder { + return &ctrlBuilder{bld: b.bld.WithLogger(log)} +} + +func (b *ctrlBuilder) Named(name string) Builder { + return &ctrlBuilder{bld: b.bld.Named(name)} +} + +func (b *ctrlBuilder) Complete(r reconcile.Reconciler) error { + return b.bld.Complete(r) +} + +func (b *ctrlBuilder) Build(r reconcile.Reconciler) (controller.Controller, error) { + return b.bld.Build(r) +} diff --git a/operator/internal/external/k8s/client.go b/operator/internal/external/k8s/client.go new file mode 100644 index 0000000000..41f41bb0d9 --- /dev/null +++ b/operator/internal/external/k8s/client.go @@ -0,0 +1,41 @@ +package k8s + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate + +// Client is a kubernetes client interface used internally. It copies functions from +// sigs.k8s.io/controller-runtime/pkg/client +// +//counterfeiter:generate . Client +type Client interface { + Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error + Get(ctx context.Context, key client.ObjectKey, obj client.Object) error + + Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error + Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error + DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error + List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error + Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error + + RESTMapper() meta.RESTMapper + Scheme() *runtime.Scheme + + Status() client.StatusWriter +} + +// StatusWriter is a kubernetes status writer interface used internally. It copies functions from +// sigs.k8s.io/controller-runtime/pkg/client +// +//counterfeiter:generate . StatusWriter +type StatusWriter interface { + Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error + Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error +} diff --git a/operator/internal/external/k8s/k8sfakes/fake_builder.go b/operator/internal/external/k8s/k8sfakes/fake_builder.go new file mode 100644 index 0000000000..b0702d2e54 --- /dev/null +++ b/operator/internal/external/k8s/k8sfakes/fake_builder.go @@ -0,0 +1,644 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package k8sfakes + +import ( + "sync" + + "github.com/go-logr/logr" + "github.com/grafana/loki-operator/internal/external/k8s" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type FakeBuilder struct { + BuildStub func(reconcile.Reconciler) (controller.Controller, error) + buildMutex sync.RWMutex + buildArgsForCall []struct { + arg1 reconcile.Reconciler + } + buildReturns struct { + result1 controller.Controller + result2 error + } + buildReturnsOnCall map[int]struct { + result1 controller.Controller + result2 error + } + CompleteStub func(reconcile.Reconciler) error + completeMutex sync.RWMutex + completeArgsForCall []struct { + arg1 reconcile.Reconciler + } + completeReturns struct { + result1 error + } + completeReturnsOnCall map[int]struct { + result1 error + } + ForStub func(client.Object, ...builder.ForOption) k8s.Builder + forMutex sync.RWMutex + forArgsForCall []struct { + arg1 client.Object + arg2 []builder.ForOption + } + forReturns struct { + result1 k8s.Builder + } + forReturnsOnCall map[int]struct { + result1 k8s.Builder + } + NamedStub func(string) k8s.Builder + namedMutex sync.RWMutex + namedArgsForCall []struct { + arg1 string + } + namedReturns struct { + result1 k8s.Builder + } + namedReturnsOnCall map[int]struct { + result1 k8s.Builder + } + OwnsStub func(client.Object, ...builder.OwnsOption) k8s.Builder + ownsMutex sync.RWMutex + ownsArgsForCall []struct { + arg1 client.Object + arg2 []builder.OwnsOption + } + ownsReturns struct { + result1 k8s.Builder + } + ownsReturnsOnCall map[int]struct { + result1 k8s.Builder + } + WithEventFilterStub func(predicate.Predicate) k8s.Builder + withEventFilterMutex sync.RWMutex + withEventFilterArgsForCall []struct { + arg1 predicate.Predicate + } + withEventFilterReturns struct { + result1 k8s.Builder + } + withEventFilterReturnsOnCall map[int]struct { + result1 k8s.Builder + } + WithLoggerStub func(logr.Logger) k8s.Builder + withLoggerMutex sync.RWMutex + withLoggerArgsForCall []struct { + arg1 logr.Logger + } + withLoggerReturns struct { + result1 k8s.Builder + } + withLoggerReturnsOnCall map[int]struct { + result1 k8s.Builder + } + WithOptionsStub func(controller.Options) k8s.Builder + withOptionsMutex sync.RWMutex + withOptionsArgsForCall []struct { + arg1 controller.Options + } + withOptionsReturns struct { + result1 k8s.Builder + } + withOptionsReturnsOnCall map[int]struct { + result1 k8s.Builder + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeBuilder) Build(arg1 reconcile.Reconciler) (controller.Controller, error) { + fake.buildMutex.Lock() + ret, specificReturn := fake.buildReturnsOnCall[len(fake.buildArgsForCall)] + fake.buildArgsForCall = append(fake.buildArgsForCall, struct { + arg1 reconcile.Reconciler + }{arg1}) + stub := fake.BuildStub + fakeReturns := fake.buildReturns + fake.recordInvocation("Build", []interface{}{arg1}) + fake.buildMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeBuilder) BuildCallCount() int { + fake.buildMutex.RLock() + defer fake.buildMutex.RUnlock() + return len(fake.buildArgsForCall) +} + +func (fake *FakeBuilder) BuildCalls(stub func(reconcile.Reconciler) (controller.Controller, error)) { + fake.buildMutex.Lock() + defer fake.buildMutex.Unlock() + fake.BuildStub = stub +} + +func (fake *FakeBuilder) BuildArgsForCall(i int) reconcile.Reconciler { + fake.buildMutex.RLock() + defer fake.buildMutex.RUnlock() + argsForCall := fake.buildArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeBuilder) BuildReturns(result1 controller.Controller, result2 error) { + fake.buildMutex.Lock() + defer fake.buildMutex.Unlock() + fake.BuildStub = nil + fake.buildReturns = struct { + result1 controller.Controller + result2 error + }{result1, result2} +} + +func (fake *FakeBuilder) BuildReturnsOnCall(i int, result1 controller.Controller, result2 error) { + fake.buildMutex.Lock() + defer fake.buildMutex.Unlock() + fake.BuildStub = nil + if fake.buildReturnsOnCall == nil { + fake.buildReturnsOnCall = make(map[int]struct { + result1 controller.Controller + result2 error + }) + } + fake.buildReturnsOnCall[i] = struct { + result1 controller.Controller + result2 error + }{result1, result2} +} + +func (fake *FakeBuilder) Complete(arg1 reconcile.Reconciler) error { + fake.completeMutex.Lock() + ret, specificReturn := fake.completeReturnsOnCall[len(fake.completeArgsForCall)] + fake.completeArgsForCall = append(fake.completeArgsForCall, struct { + arg1 reconcile.Reconciler + }{arg1}) + stub := fake.CompleteStub + fakeReturns := fake.completeReturns + fake.recordInvocation("Complete", []interface{}{arg1}) + fake.completeMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBuilder) CompleteCallCount() int { + fake.completeMutex.RLock() + defer fake.completeMutex.RUnlock() + return len(fake.completeArgsForCall) +} + +func (fake *FakeBuilder) CompleteCalls(stub func(reconcile.Reconciler) error) { + fake.completeMutex.Lock() + defer fake.completeMutex.Unlock() + fake.CompleteStub = stub +} + +func (fake *FakeBuilder) CompleteArgsForCall(i int) reconcile.Reconciler { + fake.completeMutex.RLock() + defer fake.completeMutex.RUnlock() + argsForCall := fake.completeArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeBuilder) CompleteReturns(result1 error) { + fake.completeMutex.Lock() + defer fake.completeMutex.Unlock() + fake.CompleteStub = nil + fake.completeReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeBuilder) CompleteReturnsOnCall(i int, result1 error) { + fake.completeMutex.Lock() + defer fake.completeMutex.Unlock() + fake.CompleteStub = nil + if fake.completeReturnsOnCall == nil { + fake.completeReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.completeReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeBuilder) For(arg1 client.Object, arg2 ...builder.ForOption) k8s.Builder { + fake.forMutex.Lock() + ret, specificReturn := fake.forReturnsOnCall[len(fake.forArgsForCall)] + fake.forArgsForCall = append(fake.forArgsForCall, struct { + arg1 client.Object + arg2 []builder.ForOption + }{arg1, arg2}) + stub := fake.ForStub + fakeReturns := fake.forReturns + fake.recordInvocation("For", []interface{}{arg1, arg2}) + fake.forMutex.Unlock() + if stub != nil { + return stub(arg1, arg2...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBuilder) ForCallCount() int { + fake.forMutex.RLock() + defer fake.forMutex.RUnlock() + return len(fake.forArgsForCall) +} + +func (fake *FakeBuilder) ForCalls(stub func(client.Object, ...builder.ForOption) k8s.Builder) { + fake.forMutex.Lock() + defer fake.forMutex.Unlock() + fake.ForStub = stub +} + +func (fake *FakeBuilder) ForArgsForCall(i int) (client.Object, []builder.ForOption) { + fake.forMutex.RLock() + defer fake.forMutex.RUnlock() + argsForCall := fake.forArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeBuilder) ForReturns(result1 k8s.Builder) { + fake.forMutex.Lock() + defer fake.forMutex.Unlock() + fake.ForStub = nil + fake.forReturns = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) ForReturnsOnCall(i int, result1 k8s.Builder) { + fake.forMutex.Lock() + defer fake.forMutex.Unlock() + fake.ForStub = nil + if fake.forReturnsOnCall == nil { + fake.forReturnsOnCall = make(map[int]struct { + result1 k8s.Builder + }) + } + fake.forReturnsOnCall[i] = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) Named(arg1 string) k8s.Builder { + fake.namedMutex.Lock() + ret, specificReturn := fake.namedReturnsOnCall[len(fake.namedArgsForCall)] + fake.namedArgsForCall = append(fake.namedArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.NamedStub + fakeReturns := fake.namedReturns + fake.recordInvocation("Named", []interface{}{arg1}) + fake.namedMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBuilder) NamedCallCount() int { + fake.namedMutex.RLock() + defer fake.namedMutex.RUnlock() + return len(fake.namedArgsForCall) +} + +func (fake *FakeBuilder) NamedCalls(stub func(string) k8s.Builder) { + fake.namedMutex.Lock() + defer fake.namedMutex.Unlock() + fake.NamedStub = stub +} + +func (fake *FakeBuilder) NamedArgsForCall(i int) string { + fake.namedMutex.RLock() + defer fake.namedMutex.RUnlock() + argsForCall := fake.namedArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeBuilder) NamedReturns(result1 k8s.Builder) { + fake.namedMutex.Lock() + defer fake.namedMutex.Unlock() + fake.NamedStub = nil + fake.namedReturns = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) NamedReturnsOnCall(i int, result1 k8s.Builder) { + fake.namedMutex.Lock() + defer fake.namedMutex.Unlock() + fake.NamedStub = nil + if fake.namedReturnsOnCall == nil { + fake.namedReturnsOnCall = make(map[int]struct { + result1 k8s.Builder + }) + } + fake.namedReturnsOnCall[i] = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) Owns(arg1 client.Object, arg2 ...builder.OwnsOption) k8s.Builder { + fake.ownsMutex.Lock() + ret, specificReturn := fake.ownsReturnsOnCall[len(fake.ownsArgsForCall)] + fake.ownsArgsForCall = append(fake.ownsArgsForCall, struct { + arg1 client.Object + arg2 []builder.OwnsOption + }{arg1, arg2}) + stub := fake.OwnsStub + fakeReturns := fake.ownsReturns + fake.recordInvocation("Owns", []interface{}{arg1, arg2}) + fake.ownsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBuilder) OwnsCallCount() int { + fake.ownsMutex.RLock() + defer fake.ownsMutex.RUnlock() + return len(fake.ownsArgsForCall) +} + +func (fake *FakeBuilder) OwnsCalls(stub func(client.Object, ...builder.OwnsOption) k8s.Builder) { + fake.ownsMutex.Lock() + defer fake.ownsMutex.Unlock() + fake.OwnsStub = stub +} + +func (fake *FakeBuilder) OwnsArgsForCall(i int) (client.Object, []builder.OwnsOption) { + fake.ownsMutex.RLock() + defer fake.ownsMutex.RUnlock() + argsForCall := fake.ownsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeBuilder) OwnsReturns(result1 k8s.Builder) { + fake.ownsMutex.Lock() + defer fake.ownsMutex.Unlock() + fake.OwnsStub = nil + fake.ownsReturns = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) OwnsReturnsOnCall(i int, result1 k8s.Builder) { + fake.ownsMutex.Lock() + defer fake.ownsMutex.Unlock() + fake.OwnsStub = nil + if fake.ownsReturnsOnCall == nil { + fake.ownsReturnsOnCall = make(map[int]struct { + result1 k8s.Builder + }) + } + fake.ownsReturnsOnCall[i] = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) WithEventFilter(arg1 predicate.Predicate) k8s.Builder { + fake.withEventFilterMutex.Lock() + ret, specificReturn := fake.withEventFilterReturnsOnCall[len(fake.withEventFilterArgsForCall)] + fake.withEventFilterArgsForCall = append(fake.withEventFilterArgsForCall, struct { + arg1 predicate.Predicate + }{arg1}) + stub := fake.WithEventFilterStub + fakeReturns := fake.withEventFilterReturns + fake.recordInvocation("WithEventFilter", []interface{}{arg1}) + fake.withEventFilterMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBuilder) WithEventFilterCallCount() int { + fake.withEventFilterMutex.RLock() + defer fake.withEventFilterMutex.RUnlock() + return len(fake.withEventFilterArgsForCall) +} + +func (fake *FakeBuilder) WithEventFilterCalls(stub func(predicate.Predicate) k8s.Builder) { + fake.withEventFilterMutex.Lock() + defer fake.withEventFilterMutex.Unlock() + fake.WithEventFilterStub = stub +} + +func (fake *FakeBuilder) WithEventFilterArgsForCall(i int) predicate.Predicate { + fake.withEventFilterMutex.RLock() + defer fake.withEventFilterMutex.RUnlock() + argsForCall := fake.withEventFilterArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeBuilder) WithEventFilterReturns(result1 k8s.Builder) { + fake.withEventFilterMutex.Lock() + defer fake.withEventFilterMutex.Unlock() + fake.WithEventFilterStub = nil + fake.withEventFilterReturns = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) WithEventFilterReturnsOnCall(i int, result1 k8s.Builder) { + fake.withEventFilterMutex.Lock() + defer fake.withEventFilterMutex.Unlock() + fake.WithEventFilterStub = nil + if fake.withEventFilterReturnsOnCall == nil { + fake.withEventFilterReturnsOnCall = make(map[int]struct { + result1 k8s.Builder + }) + } + fake.withEventFilterReturnsOnCall[i] = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) WithLogger(arg1 logr.Logger) k8s.Builder { + fake.withLoggerMutex.Lock() + ret, specificReturn := fake.withLoggerReturnsOnCall[len(fake.withLoggerArgsForCall)] + fake.withLoggerArgsForCall = append(fake.withLoggerArgsForCall, struct { + arg1 logr.Logger + }{arg1}) + stub := fake.WithLoggerStub + fakeReturns := fake.withLoggerReturns + fake.recordInvocation("WithLogger", []interface{}{arg1}) + fake.withLoggerMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBuilder) WithLoggerCallCount() int { + fake.withLoggerMutex.RLock() + defer fake.withLoggerMutex.RUnlock() + return len(fake.withLoggerArgsForCall) +} + +func (fake *FakeBuilder) WithLoggerCalls(stub func(logr.Logger) k8s.Builder) { + fake.withLoggerMutex.Lock() + defer fake.withLoggerMutex.Unlock() + fake.WithLoggerStub = stub +} + +func (fake *FakeBuilder) WithLoggerArgsForCall(i int) logr.Logger { + fake.withLoggerMutex.RLock() + defer fake.withLoggerMutex.RUnlock() + argsForCall := fake.withLoggerArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeBuilder) WithLoggerReturns(result1 k8s.Builder) { + fake.withLoggerMutex.Lock() + defer fake.withLoggerMutex.Unlock() + fake.WithLoggerStub = nil + fake.withLoggerReturns = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) WithLoggerReturnsOnCall(i int, result1 k8s.Builder) { + fake.withLoggerMutex.Lock() + defer fake.withLoggerMutex.Unlock() + fake.WithLoggerStub = nil + if fake.withLoggerReturnsOnCall == nil { + fake.withLoggerReturnsOnCall = make(map[int]struct { + result1 k8s.Builder + }) + } + fake.withLoggerReturnsOnCall[i] = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) WithOptions(arg1 controller.Options) k8s.Builder { + fake.withOptionsMutex.Lock() + ret, specificReturn := fake.withOptionsReturnsOnCall[len(fake.withOptionsArgsForCall)] + fake.withOptionsArgsForCall = append(fake.withOptionsArgsForCall, struct { + arg1 controller.Options + }{arg1}) + stub := fake.WithOptionsStub + fakeReturns := fake.withOptionsReturns + fake.recordInvocation("WithOptions", []interface{}{arg1}) + fake.withOptionsMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBuilder) WithOptionsCallCount() int { + fake.withOptionsMutex.RLock() + defer fake.withOptionsMutex.RUnlock() + return len(fake.withOptionsArgsForCall) +} + +func (fake *FakeBuilder) WithOptionsCalls(stub func(controller.Options) k8s.Builder) { + fake.withOptionsMutex.Lock() + defer fake.withOptionsMutex.Unlock() + fake.WithOptionsStub = stub +} + +func (fake *FakeBuilder) WithOptionsArgsForCall(i int) controller.Options { + fake.withOptionsMutex.RLock() + defer fake.withOptionsMutex.RUnlock() + argsForCall := fake.withOptionsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeBuilder) WithOptionsReturns(result1 k8s.Builder) { + fake.withOptionsMutex.Lock() + defer fake.withOptionsMutex.Unlock() + fake.WithOptionsStub = nil + fake.withOptionsReturns = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) WithOptionsReturnsOnCall(i int, result1 k8s.Builder) { + fake.withOptionsMutex.Lock() + defer fake.withOptionsMutex.Unlock() + fake.WithOptionsStub = nil + if fake.withOptionsReturnsOnCall == nil { + fake.withOptionsReturnsOnCall = make(map[int]struct { + result1 k8s.Builder + }) + } + fake.withOptionsReturnsOnCall[i] = struct { + result1 k8s.Builder + }{result1} +} + +func (fake *FakeBuilder) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.buildMutex.RLock() + defer fake.buildMutex.RUnlock() + fake.completeMutex.RLock() + defer fake.completeMutex.RUnlock() + fake.forMutex.RLock() + defer fake.forMutex.RUnlock() + fake.namedMutex.RLock() + defer fake.namedMutex.RUnlock() + fake.ownsMutex.RLock() + defer fake.ownsMutex.RUnlock() + fake.withEventFilterMutex.RLock() + defer fake.withEventFilterMutex.RUnlock() + fake.withLoggerMutex.RLock() + defer fake.withLoggerMutex.RUnlock() + fake.withOptionsMutex.RLock() + defer fake.withOptionsMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeBuilder) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ k8s.Builder = new(FakeBuilder) diff --git a/operator/internal/external/k8s/k8sfakes/fake_client.go b/operator/internal/external/k8s/k8sfakes/fake_client.go new file mode 100644 index 0000000000..bdf4050d10 --- /dev/null +++ b/operator/internal/external/k8s/k8sfakes/fake_client.go @@ -0,0 +1,785 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package k8sfakes + +import ( + "context" + "sync" + + "github.com/grafana/loki-operator/internal/external/k8s" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type FakeClient struct { + CreateStub func(context.Context, client.Object, ...client.CreateOption) error + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.CreateOption + } + createReturns struct { + result1 error + } + createReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(context.Context, client.Object, ...client.DeleteOption) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + DeleteAllOfStub func(context.Context, client.Object, ...client.DeleteAllOfOption) error + deleteAllOfMutex sync.RWMutex + deleteAllOfArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteAllOfOption + } + deleteAllOfReturns struct { + result1 error + } + deleteAllOfReturnsOnCall map[int]struct { + result1 error + } + GetStub func(context.Context, types.NamespacedName, client.Object) error + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + } + getReturns struct { + result1 error + } + getReturnsOnCall map[int]struct { + result1 error + } + ListStub func(context.Context, client.ObjectList, ...client.ListOption) error + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + } + listReturns struct { + result1 error + } + listReturnsOnCall map[int]struct { + result1 error + } + PatchStub func(context.Context, client.Object, client.Patch, ...client.PatchOption) error + patchMutex sync.RWMutex + patchArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []client.PatchOption + } + patchReturns struct { + result1 error + } + patchReturnsOnCall map[int]struct { + result1 error + } + RESTMapperStub func() meta.RESTMapper + rESTMapperMutex sync.RWMutex + rESTMapperArgsForCall []struct { + } + rESTMapperReturns struct { + result1 meta.RESTMapper + } + rESTMapperReturnsOnCall map[int]struct { + result1 meta.RESTMapper + } + SchemeStub func() *runtime.Scheme + schemeMutex sync.RWMutex + schemeArgsForCall []struct { + } + schemeReturns struct { + result1 *runtime.Scheme + } + schemeReturnsOnCall map[int]struct { + result1 *runtime.Scheme + } + StatusStub func() client.StatusWriter + statusMutex sync.RWMutex + statusArgsForCall []struct { + } + statusReturns struct { + result1 client.StatusWriter + } + statusReturnsOnCall map[int]struct { + result1 client.StatusWriter + } + UpdateStub func(context.Context, client.Object, ...client.UpdateOption) error + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + } + updateReturns struct { + result1 error + } + updateReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeClient) Create(arg1 context.Context, arg2 client.Object, arg3 ...client.CreateOption) error { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.CreateOption + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *FakeClient) CreateCalls(stub func(context.Context, client.Object, ...client.CreateOption) error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *FakeClient) CreateArgsForCall(i int) (context.Context, client.Object, []client.CreateOption) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) CreateReturns(result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) CreateReturnsOnCall(i int, result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) Delete(arg1 context.Context, arg2 client.Object, arg3 ...client.DeleteOption) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + }{arg1, arg2, arg3}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1, arg2, arg3}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *FakeClient) DeleteCalls(stub func(context.Context, client.Object, ...client.DeleteOption) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *FakeClient) DeleteArgsForCall(i int) (context.Context, client.Object, []client.DeleteOption) { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) DeleteAllOf(arg1 context.Context, arg2 client.Object, arg3 ...client.DeleteAllOfOption) error { + fake.deleteAllOfMutex.Lock() + ret, specificReturn := fake.deleteAllOfReturnsOnCall[len(fake.deleteAllOfArgsForCall)] + fake.deleteAllOfArgsForCall = append(fake.deleteAllOfArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteAllOfOption + }{arg1, arg2, arg3}) + stub := fake.DeleteAllOfStub + fakeReturns := fake.deleteAllOfReturns + fake.recordInvocation("DeleteAllOf", []interface{}{arg1, arg2, arg3}) + fake.deleteAllOfMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) DeleteAllOfCallCount() int { + fake.deleteAllOfMutex.RLock() + defer fake.deleteAllOfMutex.RUnlock() + return len(fake.deleteAllOfArgsForCall) +} + +func (fake *FakeClient) DeleteAllOfCalls(stub func(context.Context, client.Object, ...client.DeleteAllOfOption) error) { + fake.deleteAllOfMutex.Lock() + defer fake.deleteAllOfMutex.Unlock() + fake.DeleteAllOfStub = stub +} + +func (fake *FakeClient) DeleteAllOfArgsForCall(i int) (context.Context, client.Object, []client.DeleteAllOfOption) { + fake.deleteAllOfMutex.RLock() + defer fake.deleteAllOfMutex.RUnlock() + argsForCall := fake.deleteAllOfArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) DeleteAllOfReturns(result1 error) { + fake.deleteAllOfMutex.Lock() + defer fake.deleteAllOfMutex.Unlock() + fake.DeleteAllOfStub = nil + fake.deleteAllOfReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) DeleteAllOfReturnsOnCall(i int, result1 error) { + fake.deleteAllOfMutex.Lock() + defer fake.deleteAllOfMutex.Unlock() + fake.DeleteAllOfStub = nil + if fake.deleteAllOfReturnsOnCall == nil { + fake.deleteAllOfReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteAllOfReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) Get(arg1 context.Context, arg2 types.NamespacedName, arg3 client.Object) error { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + }{arg1, arg2, arg3}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1, arg2, arg3}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *FakeClient) GetCalls(stub func(context.Context, types.NamespacedName, client.Object) error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *FakeClient) GetArgsForCall(i int) (context.Context, types.NamespacedName, client.Object) { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) GetReturns(result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) GetReturnsOnCall(i int, result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + }{arg1, arg2, arg3}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1, arg2, arg3}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *FakeClient) ListCalls(stub func(context.Context, client.ObjectList, ...client.ListOption) error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *FakeClient) ListArgsForCall(i int) (context.Context, client.ObjectList, []client.ListOption) { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) ListReturns(result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) ListReturnsOnCall(i int, result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) Patch(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...client.PatchOption) error { + fake.patchMutex.Lock() + ret, specificReturn := fake.patchReturnsOnCall[len(fake.patchArgsForCall)] + fake.patchArgsForCall = append(fake.patchArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []client.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStub + fakeReturns := fake.patchReturns + fake.recordInvocation("Patch", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) PatchCallCount() int { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + return len(fake.patchArgsForCall) +} + +func (fake *FakeClient) PatchCalls(stub func(context.Context, client.Object, client.Patch, ...client.PatchOption) error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = stub +} + +func (fake *FakeClient) PatchArgsForCall(i int) (context.Context, client.Object, client.Patch, []client.PatchOption) { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + argsForCall := fake.patchArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *FakeClient) PatchReturns(result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + fake.patchReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) PatchReturnsOnCall(i int, result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + if fake.patchReturnsOnCall == nil { + fake.patchReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) RESTMapper() meta.RESTMapper { + fake.rESTMapperMutex.Lock() + ret, specificReturn := fake.rESTMapperReturnsOnCall[len(fake.rESTMapperArgsForCall)] + fake.rESTMapperArgsForCall = append(fake.rESTMapperArgsForCall, struct { + }{}) + stub := fake.RESTMapperStub + fakeReturns := fake.rESTMapperReturns + fake.recordInvocation("RESTMapper", []interface{}{}) + fake.rESTMapperMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) RESTMapperCallCount() int { + fake.rESTMapperMutex.RLock() + defer fake.rESTMapperMutex.RUnlock() + return len(fake.rESTMapperArgsForCall) +} + +func (fake *FakeClient) RESTMapperCalls(stub func() meta.RESTMapper) { + fake.rESTMapperMutex.Lock() + defer fake.rESTMapperMutex.Unlock() + fake.RESTMapperStub = stub +} + +func (fake *FakeClient) RESTMapperReturns(result1 meta.RESTMapper) { + fake.rESTMapperMutex.Lock() + defer fake.rESTMapperMutex.Unlock() + fake.RESTMapperStub = nil + fake.rESTMapperReturns = struct { + result1 meta.RESTMapper + }{result1} +} + +func (fake *FakeClient) RESTMapperReturnsOnCall(i int, result1 meta.RESTMapper) { + fake.rESTMapperMutex.Lock() + defer fake.rESTMapperMutex.Unlock() + fake.RESTMapperStub = nil + if fake.rESTMapperReturnsOnCall == nil { + fake.rESTMapperReturnsOnCall = make(map[int]struct { + result1 meta.RESTMapper + }) + } + fake.rESTMapperReturnsOnCall[i] = struct { + result1 meta.RESTMapper + }{result1} +} + +func (fake *FakeClient) Scheme() *runtime.Scheme { + fake.schemeMutex.Lock() + ret, specificReturn := fake.schemeReturnsOnCall[len(fake.schemeArgsForCall)] + fake.schemeArgsForCall = append(fake.schemeArgsForCall, struct { + }{}) + stub := fake.SchemeStub + fakeReturns := fake.schemeReturns + fake.recordInvocation("Scheme", []interface{}{}) + fake.schemeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) SchemeCallCount() int { + fake.schemeMutex.RLock() + defer fake.schemeMutex.RUnlock() + return len(fake.schemeArgsForCall) +} + +func (fake *FakeClient) SchemeCalls(stub func() *runtime.Scheme) { + fake.schemeMutex.Lock() + defer fake.schemeMutex.Unlock() + fake.SchemeStub = stub +} + +func (fake *FakeClient) SchemeReturns(result1 *runtime.Scheme) { + fake.schemeMutex.Lock() + defer fake.schemeMutex.Unlock() + fake.SchemeStub = nil + fake.schemeReturns = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *FakeClient) SchemeReturnsOnCall(i int, result1 *runtime.Scheme) { + fake.schemeMutex.Lock() + defer fake.schemeMutex.Unlock() + fake.SchemeStub = nil + if fake.schemeReturnsOnCall == nil { + fake.schemeReturnsOnCall = make(map[int]struct { + result1 *runtime.Scheme + }) + } + fake.schemeReturnsOnCall[i] = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *FakeClient) Status() client.StatusWriter { + fake.statusMutex.Lock() + ret, specificReturn := fake.statusReturnsOnCall[len(fake.statusArgsForCall)] + fake.statusArgsForCall = append(fake.statusArgsForCall, struct { + }{}) + stub := fake.StatusStub + fakeReturns := fake.statusReturns + fake.recordInvocation("Status", []interface{}{}) + fake.statusMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) StatusCallCount() int { + fake.statusMutex.RLock() + defer fake.statusMutex.RUnlock() + return len(fake.statusArgsForCall) +} + +func (fake *FakeClient) StatusCalls(stub func() client.StatusWriter) { + fake.statusMutex.Lock() + defer fake.statusMutex.Unlock() + fake.StatusStub = stub +} + +func (fake *FakeClient) StatusReturns(result1 client.StatusWriter) { + fake.statusMutex.Lock() + defer fake.statusMutex.Unlock() + fake.StatusStub = nil + fake.statusReturns = struct { + result1 client.StatusWriter + }{result1} +} + +func (fake *FakeClient) StatusReturnsOnCall(i int, result1 client.StatusWriter) { + fake.statusMutex.Lock() + defer fake.statusMutex.Unlock() + fake.StatusStub = nil + if fake.statusReturnsOnCall == nil { + fake.statusReturnsOnCall = make(map[int]struct { + result1 client.StatusWriter + }) + } + fake.statusReturnsOnCall[i] = struct { + result1 client.StatusWriter + }{result1} +} + +func (fake *FakeClient) Update(arg1 context.Context, arg2 client.Object, arg3 ...client.UpdateOption) error { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2, arg3}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *FakeClient) UpdateCalls(stub func(context.Context, client.Object, ...client.UpdateOption) error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *FakeClient) UpdateArgsForCall(i int) (context.Context, client.Object, []client.UpdateOption) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) UpdateReturns(result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) UpdateReturnsOnCall(i int, result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.deleteAllOfMutex.RLock() + defer fake.deleteAllOfMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + fake.rESTMapperMutex.RLock() + defer fake.rESTMapperMutex.RUnlock() + fake.schemeMutex.RLock() + defer fake.schemeMutex.RUnlock() + fake.statusMutex.RLock() + defer fake.statusMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeClient) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ k8s.Client = new(FakeClient) diff --git a/operator/internal/external/k8s/k8sfakes/fake_client_extensions.go b/operator/internal/external/k8s/k8sfakes/fake_client_extensions.go new file mode 100644 index 0000000000..32b0aa3817 --- /dev/null +++ b/operator/internal/external/k8s/k8sfakes/fake_client_extensions.go @@ -0,0 +1,33 @@ +package k8sfakes + +import ( + "reflect" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// SetClientObject sets out to v. +// This is primarily used within the GetStub to fake the object returned from the API to the vaule of v +// +// Examples: +// +// k.GetStub = func(_ context.Context, _ types.NamespacedName, object client.Object) error { +// k.SetClientObject(object, &stack) +// return nil +// } +func (fake *FakeClient) SetClientObject(out, v client.Object) { + reflect.Indirect(reflect.ValueOf(out)).Set(reflect.ValueOf(v).Elem()) +} + +// SetClientObjectList sets out list to v. +// This is primarily used within the GetStub to fake the object returned from the API to the vaule of v +// +// Examples: +// +// k.GetStub = func(_ context.Context, _ types.NamespacedName, list client.ObjectList) error { +// k.SetClientObjectList(list, &podList) +// return nil +// } +func (fake *FakeClient) SetClientObjectList(out, v client.ObjectList) { + reflect.Indirect(reflect.ValueOf(out)).Set(reflect.ValueOf(v).Elem()) +} diff --git a/operator/internal/external/k8s/k8sfakes/fake_status_writer.go b/operator/internal/external/k8s/k8sfakes/fake_status_writer.go new file mode 100644 index 0000000000..e564a41c47 --- /dev/null +++ b/operator/internal/external/k8s/k8sfakes/fake_status_writer.go @@ -0,0 +1,197 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package k8sfakes + +import ( + "context" + "sync" + + "github.com/grafana/loki-operator/internal/external/k8s" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type FakeStatusWriter struct { + PatchStub func(context.Context, client.Object, client.Patch, ...client.PatchOption) error + patchMutex sync.RWMutex + patchArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []client.PatchOption + } + patchReturns struct { + result1 error + } + patchReturnsOnCall map[int]struct { + result1 error + } + UpdateStub func(context.Context, client.Object, ...client.UpdateOption) error + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + } + updateReturns struct { + result1 error + } + updateReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeStatusWriter) Patch(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...client.PatchOption) error { + fake.patchMutex.Lock() + ret, specificReturn := fake.patchReturnsOnCall[len(fake.patchArgsForCall)] + fake.patchArgsForCall = append(fake.patchArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []client.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStub + fakeReturns := fake.patchReturns + fake.recordInvocation("Patch", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStatusWriter) PatchCallCount() int { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + return len(fake.patchArgsForCall) +} + +func (fake *FakeStatusWriter) PatchCalls(stub func(context.Context, client.Object, client.Patch, ...client.PatchOption) error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = stub +} + +func (fake *FakeStatusWriter) PatchArgsForCall(i int) (context.Context, client.Object, client.Patch, []client.PatchOption) { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + argsForCall := fake.patchArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *FakeStatusWriter) PatchReturns(result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + fake.patchReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStatusWriter) PatchReturnsOnCall(i int, result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + if fake.patchReturnsOnCall == nil { + fake.patchReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeStatusWriter) Update(arg1 context.Context, arg2 client.Object, arg3 ...client.UpdateOption) error { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2, arg3}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStatusWriter) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *FakeStatusWriter) UpdateCalls(stub func(context.Context, client.Object, ...client.UpdateOption) error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *FakeStatusWriter) UpdateArgsForCall(i int) (context.Context, client.Object, []client.UpdateOption) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeStatusWriter) UpdateReturns(result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStatusWriter) UpdateReturnsOnCall(i int, result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeStatusWriter) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeStatusWriter) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ k8s.StatusWriter = new(FakeStatusWriter) diff --git a/operator/internal/handlers/internal/gateway/base_domain.go b/operator/internal/handlers/internal/gateway/base_domain.go new file mode 100644 index 0000000000..f029017427 --- /dev/null +++ b/operator/internal/handlers/internal/gateway/base_domain.go @@ -0,0 +1,41 @@ +package gateway + +import ( + "context" + + "github.com/ViaQ/logerr/kverrors" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s" + "github.com/grafana/loki-operator/internal/status" + configv1 "github.com/openshift/api/config/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetOpenShiftBaseDomain returns the cluster DNS base domain on OpenShift +// clusters to auto-create redirect URLs for OpenShift Auth or an error. +// If the config.openshift.io/DNS object is not found the whole lokistack +// resoure is set to a degraded state. +func GetOpenShiftBaseDomain(ctx context.Context, k k8s.Client, req ctrl.Request) (string, error) { + var cluster configv1.DNS + key := client.ObjectKey{Name: "cluster"} + if err := k.Get(ctx, key, &cluster); err != nil { + + if apierrors.IsNotFound(err) { + statusErr := status.SetDegradedCondition(ctx, k, req, + "Missing cluster DNS configuration to read base domain", + lokiv1beta1.ReasonMissingGatewayOpenShiftBaseDomain, + ) + if statusErr != nil { + return "", statusErr + } + + return "", kverrors.Wrap(err, "Missing cluster DNS configuration to read base domain") + } + return "", kverrors.Wrap(err, "failed to lookup lokistack gateway base domain", + "name", key) + } + + return cluster.Spec.BaseDomain, nil +} diff --git a/operator/internal/handlers/internal/gateway/modes.go b/operator/internal/handlers/internal/gateway/modes.go new file mode 100644 index 0000000000..432f182dc4 --- /dev/null +++ b/operator/internal/handlers/internal/gateway/modes.go @@ -0,0 +1,57 @@ +package gateway + +import ( + "github.com/ViaQ/logerr/kverrors" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" +) + +// ValidateModes validates the tenants mode specification. +func ValidateModes(stack lokiv1beta1.LokiStack) error { + if stack.Spec.Tenants.Mode == lokiv1beta1.Static { + if stack.Spec.Tenants.Authentication == nil { + return kverrors.New("mandatory configuration - missing tenants' authentication configuration") + } + + if stack.Spec.Tenants.Authorization == nil || stack.Spec.Tenants.Authorization.Roles == nil { + return kverrors.New("mandatory configuration - missing roles configuration") + } + + if stack.Spec.Tenants.Authorization == nil || stack.Spec.Tenants.Authorization.RoleBindings == nil { + return kverrors.New("mandatory configuration - missing role bindings configuration") + } + + if stack.Spec.Tenants.Authorization != nil && stack.Spec.Tenants.Authorization.OPA != nil { + return kverrors.New("incompatible configuration - OPA URL not required for mode static") + } + } + + if stack.Spec.Tenants.Mode == lokiv1beta1.Dynamic { + if stack.Spec.Tenants.Authentication == nil { + return kverrors.New("mandatory configuration - missing tenants configuration") + } + + if stack.Spec.Tenants.Authorization == nil || stack.Spec.Tenants.Authorization.OPA == nil { + return kverrors.New("mandatory configuration - missing OPA Url") + } + + if stack.Spec.Tenants.Authorization != nil && stack.Spec.Tenants.Authorization.Roles != nil { + return kverrors.New("incompatible configuration - static roles not required for mode dynamic") + } + + if stack.Spec.Tenants.Authorization != nil && stack.Spec.Tenants.Authorization.RoleBindings != nil { + return kverrors.New("incompatible configuration - static roleBindings not required for mode dynamic") + } + } + + if stack.Spec.Tenants.Mode == lokiv1beta1.OpenshiftLogging { + if stack.Spec.Tenants.Authentication != nil { + return kverrors.New("incompatible configuration - custom tenants configuration not required") + } + + if stack.Spec.Tenants.Authorization != nil { + return kverrors.New("incompatible configuration - custom tenants configuration not required") + } + } + + return nil +} diff --git a/operator/internal/handlers/internal/gateway/modes_test.go b/operator/internal/handlers/internal/gateway/modes_test.go new file mode 100644 index 0000000000..8dd0353f2e --- /dev/null +++ b/operator/internal/handlers/internal/gateway/modes_test.go @@ -0,0 +1,545 @@ +package gateway + +import ( + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestValidateModes_StaticMode(t *testing.T) { + type test struct { + name string + wantErr string + stack lokiv1beta1.LokiStack + } + table := []test{ + { + name: "missing authentication spec", + wantErr: "mandatory configuration - missing tenants' authentication configuration", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "static", + }, + }, + }, + }, + { + name: "missing roles spec", + wantErr: "mandatory configuration - missing roles configuration", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "static", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + IssuerURL: "some-url", + RedirectURL: "some-other-url", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + Roles: nil, + }, + }, + }, + }, + }, + { + name: "missing role bindings spec", + wantErr: "mandatory configuration - missing role bindings configuration", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "static", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + IssuerURL: "some-url", + RedirectURL: "some-other-url", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + Roles: []lokiv1beta1.RoleSpec{ + { + Name: "some-name", + Resources: []string{"test"}, + Tenants: []string{"test"}, + Permissions: []lokiv1beta1.PermissionType{"read"}, + }, + }, + RoleBindings: nil, + }, + }, + }, + }, + }, + { + name: "incompatible OPA URL provided", + wantErr: "incompatible configuration - OPA URL not required for mode static", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "static", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + IssuerURL: "some-url", + RedirectURL: "some-other-url", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "some-url", + }, + Roles: []lokiv1beta1.RoleSpec{ + { + Name: "some-name", + Resources: []string{"test"}, + Tenants: []string{"test"}, + Permissions: []lokiv1beta1.PermissionType{"read"}, + }, + }, + RoleBindings: []lokiv1beta1.RoleBindingsSpec{ + { + Name: "some-name", + Subjects: []lokiv1beta1.Subject{ + { + Name: "sub-1", + Kind: "user", + }, + }, + Roles: []string{"some-role"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "all set", + wantErr: "", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "static", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + IssuerURL: "some-url", + RedirectURL: "some-other-url", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + Roles: []lokiv1beta1.RoleSpec{ + { + Name: "some-name", + Resources: []string{"test"}, + Tenants: []string{"test"}, + Permissions: []lokiv1beta1.PermissionType{"read"}, + }, + }, + RoleBindings: []lokiv1beta1.RoleBindingsSpec{ + { + Name: "some-name", + Subjects: []lokiv1beta1.Subject{ + { + Name: "sub-1", + Kind: "user", + }, + }, + Roles: []string{"some-role"}, + }, + }, + }, + }, + }, + }, + }, + } + for _, tst := range table { + tst := tst + t.Run(tst.name, func(t *testing.T) { + t.Parallel() + + err := ValidateModes(tst.stack) + if tst.wantErr != "" { + require.EqualError(t, err, tst.wantErr) + } + }) + } +} + +func TestValidateModes_DynamicMode(t *testing.T) { + type test struct { + name string + wantErr string + stack lokiv1beta1.LokiStack + } + table := []test{ + { + name: "missing authentication spec", + wantErr: "mandatory configuration - missing tenants configuration", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "dynamic", + }, + }, + }, + }, + { + name: "missing OPA URL spec", + wantErr: "mandatory configuration - missing OPA Url", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + IssuerURL: "some-url", + RedirectURL: "some-other-url", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: nil, + }, + }, + }, + }, + }, + { + name: "incompatible roles configuration provided", + wantErr: "incompatible configuration - static roles not required for mode dynamic", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + IssuerURL: "some-url", + RedirectURL: "some-other-url", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "some-url", + }, + Roles: []lokiv1beta1.RoleSpec{ + { + Name: "some-name", + Resources: []string{"test"}, + Tenants: []string{"test"}, + Permissions: []lokiv1beta1.PermissionType{"read"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "incompatible roleBindings configuration provided", + wantErr: "incompatible configuration - static roleBindings not required for mode dynamic", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + IssuerURL: "some-url", + RedirectURL: "some-other-url", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "some-url", + }, + RoleBindings: []lokiv1beta1.RoleBindingsSpec{ + { + Name: "some-name", + Subjects: []lokiv1beta1.Subject{ + { + Name: "sub-1", + Kind: "user", + }, + }, + Roles: []string{"some-role"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "all set", + wantErr: "", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + IssuerURL: "some-url", + RedirectURL: "some-other-url", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + }, + }, + } + for _, tst := range table { + tst := tst + t.Run(tst.name, func(t *testing.T) { + t.Parallel() + + err := ValidateModes(tst.stack) + if tst.wantErr != "" { + require.EqualError(t, err, tst.wantErr) + } + }) + } +} + +func TestValidateModes_OpenshiftLoggingMode(t *testing.T) { + type test struct { + name string + wantErr string + stack lokiv1beta1.LokiStack + } + table := []test{ + { + name: "incompatible authentication spec provided", + wantErr: "incompatible configuration - custom tenants configuration not required", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "openshift-logging", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + IssuerURL: "some-url", + RedirectURL: "some-other-url", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + }, + }, + }, + }, + { + name: "incompatible authorization spec provided", + wantErr: "incompatible configuration - custom tenants configuration not required", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "openshift-logging", + Authentication: nil, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + }, + }, + { + name: "all set", + wantErr: "", + stack: lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "openshift-logging", + }, + }, + }, + }, + } + for _, tst := range table { + tst := tst + t.Run(tst.name, func(t *testing.T) { + t.Parallel() + + err := ValidateModes(tst.stack) + if tst.wantErr != "" { + require.EqualError(t, err, tst.wantErr) + } + }) + } +} diff --git a/operator/internal/handlers/internal/gateway/tenant_configmap.go b/operator/internal/handlers/internal/gateway/tenant_configmap.go new file mode 100644 index 0000000000..b4ea2ad5bf --- /dev/null +++ b/operator/internal/handlers/internal/gateway/tenant_configmap.go @@ -0,0 +1,90 @@ +package gateway + +import ( + "context" + + "github.com/grafana/loki-operator/internal/manifests/openshift" + + "github.com/ViaQ/logerr/log" + + "github.com/ViaQ/logerr/kverrors" + "github.com/grafana/loki-operator/internal/manifests" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/json" + "sigs.k8s.io/yaml" + + "github.com/grafana/loki-operator/internal/external/k8s" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // LokiGatewayTenantFileName is the name of the tenant config file in the configmap + LokiGatewayTenantFileName = "tenants.yaml" +) + +type tenantsConfigJSON struct { + Tenants []tenantsSpec `json:"tenants,omitempty"` +} + +type tenantsSpec struct { + Name string `json:"name"` + ID string `json:"id"` + OpenShift *openShiftSpec `json:"openshift"` +} + +type openShiftSpec struct { + ServiceAccount string `json:"serviceAccount"` + RedirectURL string `json:"redirectURL"` + CookieSecret string `json:"cookieSecret"` +} + +// GetTenantConfigMapData returns the tenantName, tenantId, cookieSecret +// clusters to auto-create redirect URLs for OpenShift Auth or an error. +func GetTenantConfigMapData(ctx context.Context, k k8s.Client, req ctrl.Request) map[string]openshift.TenantData { + var tenantConfigMap corev1.ConfigMap + key := client.ObjectKey{Name: manifests.LabelGatewayComponent, Namespace: req.Namespace} + if err := k.Get(ctx, key, &tenantConfigMap); err != nil { + log.Error(err, "couldn't find") + return nil + } + + tcm, err := extractTenantConfigMap(&tenantConfigMap) + if err != nil { + log.Error(err, "error occurred in extracting tenants.yaml configMap.") + return nil + } + + tcmMap := make(map[string]openshift.TenantData) + for _, tenant := range tcm.Tenants { + tcmMap[tenant.Name] = openshift.TenantData{ + TenantID: tenant.ID, + CookieSecret: tenant.OpenShift.CookieSecret, + } + } + + return tcmMap +} + +// extractTenantConfigMap extracts tenants.yaml data if valid. +// This is to be used to configure tenant's authentication spec when exists. +func extractTenantConfigMap(cm *corev1.ConfigMap) (*tenantsConfigJSON, error) { + // Extract required fields from tenants.yaml + tenantConfigYAML, ok := cm.BinaryData[LokiGatewayTenantFileName] + if !ok { + return nil, kverrors.New("missing tenants.yaml file in configMap.") + } + + tenantConfigJSON, err := yaml.YAMLToJSON(tenantConfigYAML) + if err != nil { + return nil, kverrors.New("error in converting tenant config yaml to json.") + } + + var tenantConfig tenantsConfigJSON + err = json.Unmarshal(tenantConfigJSON, &tenantConfig) + if err != nil { + return nil, kverrors.New("error in unmarshalling tenant config to struct.") + } + + return &tenantConfig, nil +} diff --git a/operator/internal/handlers/internal/gateway/tenant_configmap_test.go b/operator/internal/handlers/internal/gateway/tenant_configmap_test.go new file mode 100644 index 0000000000..208b58ca17 --- /dev/null +++ b/operator/internal/handlers/internal/gateway/tenant_configmap_test.go @@ -0,0 +1,96 @@ +package gateway + +import ( + "context" + "testing" + + "github.com/grafana/loki-operator/internal/manifests/openshift" + + "github.com/grafana/loki-operator/internal/external/k8s/k8sfakes" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var tenantConfigData = []byte(` +tenants: +- name: application + id: test-123 + openshift: + serviceAccount: lokistack-gateway-lokistack-dev + cookieSecret: test123 +- name: infrastructure + id: test-456 + openshift: + serviceAccount: lokistack-gateway-lokistack-dev + cookieSecret: test456 +- name: audit + id: test-789 + openshift: + serviceAccount: lokistack-gateway-lokistack-dev + cookieSecret: test789 +`) + +func TestGetTenantConfigMapData_ConfigMapExist(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "lokistack-gateway", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if name.Name == "lokistack-gateway" && name.Namespace == "some-ns" { + k.SetClientObject(object, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "lokistack-gateway", + Namespace: "some-ns", + }, + BinaryData: map[string][]byte{ + "tenants.yaml": tenantConfigData, + }, + }) + } + return nil + } + + ts := GetTenantConfigMapData(context.TODO(), k, r) + require.NotNil(t, ts) + + expected := map[string]openshift.TenantData{ + "application": { + TenantID: "test-123", + CookieSecret: "test123", + }, + "infrastructure": { + TenantID: "test-456", + CookieSecret: "test456", + }, + "audit": { + TenantID: "test-789", + CookieSecret: "test789", + }, + } + require.Equal(t, expected, ts) +} + +func TestGetTenantConfigMapData_ConfigMapNotExist(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "lokistack-gateway", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return nil + } + + ts := GetTenantConfigMapData(context.TODO(), k, r) + require.Nil(t, ts) +} diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets.go b/operator/internal/handlers/internal/gateway/tenant_secrets.go new file mode 100644 index 0000000000..15ab69fbcf --- /dev/null +++ b/operator/internal/handlers/internal/gateway/tenant_secrets.go @@ -0,0 +1,71 @@ +package gateway + +import ( + "context" + "fmt" + + "github.com/ViaQ/logerr/kverrors" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s" + "github.com/grafana/loki-operator/internal/handlers/internal/secrets" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/grafana/loki-operator/internal/status" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetTenantSecrets returns the list to gateway tenant secrets for a tenant mode. +// For modes static and dynamic the secrets are fetched from external provided +// secrets. For mode openshift-logging a secret per default tenants are created. +// All secrets live in the same namespace as the lokistack request. +func GetTenantSecrets( + ctx context.Context, + k k8s.Client, + req ctrl.Request, + stack *lokiv1beta1.LokiStack, +) ([]*manifests.TenantSecrets, error) { + var ( + tenantSecrets []*manifests.TenantSecrets + gatewaySecret corev1.Secret + ) + + for _, tenant := range stack.Spec.Tenants.Authentication { + key := client.ObjectKey{Name: tenant.OIDC.Secret.Name, Namespace: req.Namespace} + if err := k.Get(ctx, key, &gatewaySecret); err != nil { + if apierrors.IsNotFound(err) { + statusErr := status.SetDegradedCondition(ctx, k, req, + fmt.Sprintf("Missing secrets for tenant %s", tenant.TenantName), + lokiv1beta1.ReasonMissingGatewayTenantSecret, + ) + if statusErr != nil { + return nil, statusErr + } + + return nil, kverrors.Wrap(err, "Missing gateway secrets") + } + return nil, kverrors.Wrap(err, "failed to lookup lokistack gateway tenant secret", + "name", key) + } + + var ts *manifests.TenantSecrets + ts, err := secrets.ExtractGatewaySecret(&gatewaySecret, tenant.TenantName) + if err != nil { + statusErr := status.SetDegradedCondition(ctx, k, req, + "Invalid gateway tenant secret contents", + lokiv1beta1.ReasonInvalidGatewayTenantSecret, + ) + if statusErr != nil { + return nil, statusErr + } + + return nil, kverrors.Wrap(err, "Invalid gateway tenant secret") + } + tenantSecrets = append(tenantSecrets, ts) + } + + return tenantSecrets, nil +} diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go new file mode 100644 index 0000000000..0802624802 --- /dev/null +++ b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go @@ -0,0 +1,144 @@ +package gateway + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s/k8sfakes" + "github.com/grafana/loki-operator/internal/manifests" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestGetTenantSecrets_StaticMode(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + s := &lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mystack", + Namespace: "some-ns", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.Static, + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "test", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: "test", + }, + }, + }, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if name.Name == "test" && name.Namespace == "some-ns" { + k.SetClientObject(object, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "some-ns", + }, + Data: map[string][]byte{ + "clientID": []byte("test"), + "clientSecret": []byte("test"), + "issuerCAPath": []byte("/path/to/ca/file"), + }, + }) + } + return nil + } + + ts, err := GetTenantSecrets(context.TODO(), k, r, s) + require.NoError(t, err) + + expected := []*manifests.TenantSecrets{ + { + TenantName: "test", + ClientID: "test", + ClientSecret: "test", + IssuerCAPath: "/path/to/ca/file", + }, + } + require.ElementsMatch(t, ts, expected) +} + +func TestGetTenantSecrets_DynamicMode(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + s := &lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mystack", + Namespace: "some-ns", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.Dynamic, + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "test", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: "test", + }, + }, + }, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if name.Name == "test" && name.Namespace == "some-ns" { + k.SetClientObject(object, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "some-ns", + }, + Data: map[string][]byte{ + "clientID": []byte("test"), + "clientSecret": []byte("test"), + "issuerCAPath": []byte("/path/to/ca/file"), + }, + }) + } + return nil + } + + ts, err := GetTenantSecrets(context.TODO(), k, r, s) + require.NoError(t, err) + + expected := []*manifests.TenantSecrets{ + { + TenantName: "test", + ClientID: "test", + ClientSecret: "test", + IssuerCAPath: "/path/to/ca/file", + }, + } + require.ElementsMatch(t, ts, expected) +} diff --git a/operator/internal/handlers/internal/secrets/secrets.go b/operator/internal/handlers/internal/secrets/secrets.go new file mode 100644 index 0000000000..24a1f722ba --- /dev/null +++ b/operator/internal/handlers/internal/secrets/secrets.go @@ -0,0 +1,68 @@ +package secrets + +import ( + "github.com/ViaQ/logerr/kverrors" + "github.com/grafana/loki-operator/internal/manifests" + + corev1 "k8s.io/api/core/v1" +) + +// Extract reads a k8s secret into a manifest object storage struct if valid. +func Extract(s *corev1.Secret) (*manifests.ObjectStorage, error) { + // Extract and validate mandatory fields + endpoint, ok := s.Data["endpoint"] + if !ok { + return nil, kverrors.New("missing secret field", "field", "endpoint") + } + buckets, ok := s.Data["bucketnames"] + if !ok { + return nil, kverrors.New("missing secret field", "field", "bucketnames") + } + // TODO buckets are comma-separated list + id, ok := s.Data["access_key_id"] + if !ok { + return nil, kverrors.New("missing secret field", "field", "access_key_id") + } + secret, ok := s.Data["access_key_secret"] + if !ok { + return nil, kverrors.New("missing secret field", "field", "access_key_secret") + } + + // Extract and validate optional fields + region, ok := s.Data["region"] + if !ok { + region = []byte("") + } + + return &manifests.ObjectStorage{ + Endpoint: string(endpoint), + Buckets: string(buckets), + AccessKeyID: string(id), + AccessKeySecret: string(secret), + Region: string(region), + }, nil +} + +// ExtractGatewaySecret reads a k8s secret into a manifest tenant secret struct if valid. +func ExtractGatewaySecret(s *corev1.Secret, tenantName string) (*manifests.TenantSecrets, error) { + // Extract and validate mandatory fields + clientID, ok := s.Data["clientID"] + if !ok { + return nil, kverrors.New("missing clientID field", "field", "clientID") + } + clientSecret, ok := s.Data["clientSecret"] + if !ok { + return nil, kverrors.New("missing clientSecret field", "field", "clientSecret") + } + issuerCAPath, ok := s.Data["issuerCAPath"] + if !ok { + return nil, kverrors.New("missing issuerCAPath field", "field", "issuerCAPath") + } + + return &manifests.TenantSecrets{ + TenantName: tenantName, + ClientID: string(clientID), + ClientSecret: string(clientSecret), + IssuerCAPath: string(issuerCAPath), + }, nil +} diff --git a/operator/internal/handlers/internal/secrets/secrets_test.go b/operator/internal/handlers/internal/secrets/secrets_test.go new file mode 100644 index 0000000000..2e1200ec40 --- /dev/null +++ b/operator/internal/handlers/internal/secrets/secrets_test.go @@ -0,0 +1,142 @@ +package secrets_test + +import ( + "testing" + + "github.com/grafana/loki-operator/internal/handlers/internal/secrets" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" +) + +func TestExtract(t *testing.T) { + type test struct { + name string + secret *corev1.Secret + wantErr bool + } + table := []test{ + { + name: "missing endpoint", + secret: &corev1.Secret{}, + wantErr: true, + }, + { + name: "missing bucketnames", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "endpoint": []byte("here"), + }, + }, + wantErr: true, + }, + { + name: "missing access_key_id", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "endpoint": []byte("here"), + "bucketnames": []byte("this,that"), + }, + }, + wantErr: true, + }, + { + name: "missing access_key_secret", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "endpoint": []byte("here"), + "bucketnames": []byte("this,that"), + "access_key_id": []byte("id"), + }, + }, + wantErr: true, + }, + { + name: "all set", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "endpoint": []byte("here"), + "bucketnames": []byte("this,that"), + "access_key_id": []byte("id"), + "access_key_secret": []byte("secret"), + }, + }, + }, + } + for _, tst := range table { + tst := tst + t.Run(tst.name, func(t *testing.T) { + t.Parallel() + + _, err := secrets.Extract(tst.secret) + if !tst.wantErr { + require.NoError(t, err) + } + if tst.wantErr { + require.NotNil(t, err) + } + }) + } +} + +func TestExtractGatewaySecret(t *testing.T) { + type test struct { + name string + tenantName string + secret *corev1.Secret + wantErr bool + } + table := []test{ + { + name: "missing clientID", + tenantName: "tenant-a", + secret: &corev1.Secret{}, + wantErr: true, + }, + { + name: "missing clientSecret", + tenantName: "tenant-a", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "clientID": []byte("test"), + }, + }, + wantErr: true, + }, + { + name: "missing issuerCAPath", + tenantName: "tenant-a", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "clientID": []byte("test"), + "clientSecret": []byte("test"), + }, + }, + wantErr: true, + }, + { + name: "all set", + tenantName: "tenant-a", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "clientID": []byte("test"), + "clientSecret": []byte("test"), + "issuerCAPath": []byte("/tmp/test"), + }, + }, + }, + } + for _, tst := range table { + tst := tst + t.Run(tst.name, func(t *testing.T) { + t.Parallel() + + _, err := secrets.ExtractGatewaySecret(tst.secret, tst.tenantName) + if !tst.wantErr { + require.NoError(t, err) + } + if tst.wantErr { + require.NotNil(t, err) + } + }) + } +} diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go new file mode 100644 index 0000000000..757ac1c973 --- /dev/null +++ b/operator/internal/handlers/lokistack_create_or_update.go @@ -0,0 +1,189 @@ +package handlers + +import ( + "context" + "fmt" + "os" + + "github.com/grafana/loki-operator/internal/manifests/openshift" + + "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/log" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s" + "github.com/grafana/loki-operator/internal/handlers/internal/gateway" + "github.com/grafana/loki-operator/internal/handlers/internal/secrets" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/grafana/loki-operator/internal/metrics" + "github.com/grafana/loki-operator/internal/status" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// CreateOrUpdateLokiStack handles LokiStack create and update events. +func CreateOrUpdateLokiStack(ctx context.Context, req ctrl.Request, k k8s.Client, s *runtime.Scheme, flags manifests.FeatureFlags) error { + ll := log.WithValues("lokistack", req.NamespacedName, "event", "createOrUpdate") + + var stack lokiv1beta1.LokiStack + if err := k.Get(ctx, req.NamespacedName, &stack); err != nil { + if apierrors.IsNotFound(err) { + // maybe the user deleted it before we could react? Either way this isn't an issue + ll.Error(err, "could not find the requested loki stack", "name", req.NamespacedName) + return nil + } + return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) + } + + img := os.Getenv(manifests.EnvRelatedImageLoki) + if img == "" { + img = manifests.DefaultContainerImage + } + + gwImg := os.Getenv(manifests.EnvRelatedImageGateway) + if gwImg == "" { + gwImg = manifests.DefaultLokiStackGatewayImage + } + + var s3secret corev1.Secret + key := client.ObjectKey{Name: stack.Spec.Storage.Secret.Name, Namespace: stack.Namespace} + if err := k.Get(ctx, key, &s3secret); err != nil { + if apierrors.IsNotFound(err) { + return status.SetDegradedCondition(ctx, k, req, + "Missing object storage secret", + lokiv1beta1.ReasonMissingObjectStorageSecret, + ) + } + return kverrors.Wrap(err, "failed to lookup lokistack s3 secret", "name", key) + } + + storage, err := secrets.Extract(&s3secret) + if err != nil { + return status.SetDegradedCondition(ctx, k, req, + "Invalid object storage secret contents", + lokiv1beta1.ReasonInvalidObjectStorageSecret, + ) + } + + var ( + baseDomain string + tenantSecrets []*manifests.TenantSecrets + tenantConfigMap map[string]openshift.TenantData + ) + if flags.EnableGateway && stack.Spec.Tenants != nil { + if err = gateway.ValidateModes(stack); err != nil { + return status.SetDegradedCondition(ctx, k, req, + fmt.Sprintf("Invalid tenants configuration: %s", err), + lokiv1beta1.ReasonInvalidTenantsConfiguration, + ) + } + + if stack.Spec.Tenants.Mode != lokiv1beta1.OpenshiftLogging { + tenantSecrets, err = gateway.GetTenantSecrets(ctx, k, req, &stack) + if err != nil { + return err + } + } + + if stack.Spec.Tenants.Mode == lokiv1beta1.OpenshiftLogging { + baseDomain, err = gateway.GetOpenShiftBaseDomain(ctx, k, req) + if err != nil { + return nil + } + + // extract the existing tenant's id, cookieSecret if exists, otherwise create new. + tenantConfigMap = gateway.GetTenantConfigMapData(ctx, k, req) + } + } + + // Here we will translate the lokiv1beta1.LokiStack options into manifest options + opts := manifests.Options{ + Name: req.Name, + Namespace: req.Namespace, + Image: img, + GatewayImage: gwImg, + GatewayBaseDomain: baseDomain, + Stack: stack.Spec, + Flags: flags, + ObjectStorage: *storage, + TenantSecrets: tenantSecrets, + TenantConfigMap: tenantConfigMap, + } + + ll.Info("begin building manifests") + + if optErr := manifests.ApplyDefaultSettings(&opts); optErr != nil { + ll.Error(optErr, "failed to conform options to build settings") + return optErr + } + + if flags.EnableGateway { + if optErr := manifests.ApplyGatewayDefaultOptions(&opts); optErr != nil { + ll.Error(optErr, "failed to apply defaults options to gateway settings ") + return err + } + } + + objects, err := manifests.BuildAll(opts) + if err != nil { + ll.Error(err, "failed to build manifests") + return err + } + ll.Info("manifests built", "count", len(objects)) + + var errCount int32 + + for _, obj := range objects { + l := ll.WithValues( + "object_name", obj.GetName(), + "object_kind", obj.GetObjectKind(), + ) + + if isNamespaceScoped(obj) { + obj.SetNamespace(req.Namespace) + + if err := ctrl.SetControllerReference(&stack, obj, s); err != nil { + l.Error(err, "failed to set controller owner reference to resource") + errCount++ + continue + } + } + + desired := obj.DeepCopyObject().(client.Object) + mutateFn := manifests.MutateFuncFor(obj, desired) + + op, err := ctrl.CreateOrUpdate(ctx, k, obj, mutateFn) + if err != nil { + l.Error(err, "failed to configure resource") + errCount++ + continue + } + + l.Info(fmt.Sprintf("Resource has been %s", op)) + } + + if errCount > 0 { + return kverrors.New("failed to configure lokistack resources", "name", req.NamespacedName) + } + + // 1x.extra-small is used only for development, so the metrics will not + // be collected. + if opts.Stack.Size != lokiv1beta1.SizeOneXExtraSmall { + metrics.Collect(&opts.Stack, opts.Name) + } + + return nil +} + +func isNamespaceScoped(obj client.Object) bool { + switch obj.(type) { + case *rbacv1.ClusterRole, *rbacv1.ClusterRoleBinding: + return false + default: + return true + } +} diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go new file mode 100644 index 0000000000..f0debc4e2a --- /dev/null +++ b/operator/internal/handlers/lokistack_create_or_update_test.go @@ -0,0 +1,950 @@ +package handlers_test + +import ( + "context" + "errors" + "flag" + "io/ioutil" + "os" + "testing" + + "github.com/ViaQ/logerr/log" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s/k8sfakes" + "github.com/grafana/loki-operator/internal/handlers" + "github.com/grafana/loki-operator/internal/manifests" + routev1 "github.com/openshift/api/route/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/pointer" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + scheme = runtime.NewScheme() + flags = manifests.FeatureFlags{ + EnableCertificateSigningService: false, + EnableServiceMonitors: false, + EnableTLSServiceMonitorConfig: false, + } + + defaultSecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{ + "endpoint": []byte("s3://your-endpoint"), + "region": []byte("a-region"), + "bucketnames": []byte("bucket1,bucket2"), + "access_key_id": []byte("a-secret-id"), + "access_key_secret": []byte("a-secret-key"), + }, + } + + defaultGatewaySecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-gateway-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{ + "clientID": []byte("client-123"), + "clientSecret": []byte("client-secret-xyz"), + "issuerCAPath": []byte("/tmp/test/ca.pem"), + }, + } + + invalidSecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{}, + } +) + +func TestMain(m *testing.M) { + testing.Init() + flag.Parse() + + if testing.Verbose() { + // set to the highest for verbose testing + log.SetLogLevel(5) + } else { + if err := log.SetOutput(ioutil.Discard); err != nil { + // This would only happen if the default logger was changed which it hasn't so + // we can assume that a panic is necessary and the developer is to blame. + panic(err) + } + } + + // Register the clientgo and CRD schemes + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(routev1.AddToScheme(scheme)) + utilruntime.Must(lokiv1beta1.AddToScheme(scheme)) + + log.Init("testing") + os.Exit(m.Run()) +} + +func TestCreateOrUpdateLokiStack_WhenGetReturnsNotFound_DoesNotError(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(ctx context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags) + require.NoError(t, err) + + // make sure create was NOT called because the Get failed + require.Zero(t, k.CreateCallCount()) +} + +func TestCreateOrUpdateLokiStack_WhenGetReturnsAnErrorOtherThanNotFound_ReturnsTheError(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + badRequestErr := apierrors.NewBadRequest("you do not belong here") + k.GetStub = func(ctx context.Context, name types.NamespacedName, object client.Object) error { + return badRequestErr + } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags) + + require.Equal(t, badRequestErr, errors.Unwrap(err)) + + // make sure create was NOT called because the Get failed + require.Zero(t, k.CreateCallCount()) +} + +func TestCreateOrUpdateLokiStack_SetsNamespaceOnAllObjects(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: defaultGatewaySecret.Name, + }, + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, out client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(out, &stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(out, &defaultSecret) + return nil + } + if defaultGatewaySecret.Name == name.Name { + k.SetClientObject(out, &defaultGatewaySecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error { + assert.Equal(t, r.Namespace, o.GetNamespace()) + return nil + } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags) + require.NoError(t, err) + + // make sure create was called + require.NotZero(t, k.CreateCallCount()) +} + +func TestCreateOrUpdateLokiStack_SetsOwnerRefOnAllObjects(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "someStack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: defaultGatewaySecret.Name, + }, + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + } + + // Create looks up the CR first, so we need to return our fake stack + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + if defaultGatewaySecret.Name == name.Name { + k.SetClientObject(object, &defaultGatewaySecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + expected := metav1.OwnerReference{ + APIVersion: lokiv1beta1.GroupVersion.String(), + Kind: stack.Kind, + Name: stack.Name, + UID: stack.UID, + Controller: pointer.BoolPtr(true), + BlockOwnerDeletion: pointer.BoolPtr(true), + } + + k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error { + // OwnerRefs are appended so we have to find ours in the list + var ref metav1.OwnerReference + var found bool + for _, or := range o.GetOwnerReferences() { + if or.UID == stack.UID { + found = true + ref = or + break + } + } + + require.True(t, found, "expected to find a matching ownerRef, but did not") + require.EqualValues(t, expected, ref) + return nil + } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags) + require.NoError(t, err) + + // make sure create was called + require.NotZero(t, k.CreateCallCount()) +} + +func TestCreateOrUpdateLokiStack_WhenSetControllerRefInvalid_ContinueWithOtherObjects(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "someStack", + // Set invalid namespace here, because + // because cross-namespace controller + // references are not allowed + Namespace: "invalid-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + }, + }, + }, + } + + // Create looks up the CR first, so we need to return our fake stack + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &stack) + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + } + return nil + } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags) + + // make sure error is returned to re-trigger reconciliation + require.Error(t, err) +} + +func TestCreateOrUpdateLokiStack_WhenGetReturnsNoError_UpdateObjects(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "someStack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + }, + }, + }, + } + + svc := corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "loki-gossip-ring-my-stack", + Namespace: "some-ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "loki", + "app.kubernetes.io/provider": "openshift", + "loki.grafana.com/name": "my-stack", + + // Add custom label to fake semantic not equal + "test": "test", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "loki.grafana.com/v1beta1", + Kind: "LokiStack", + Name: "someStack", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + Controller: pointer.BoolPtr(true), + BlockOwnerDeletion: pointer.BoolPtr(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: "gossip", + Port: 7946, + Protocol: "TCP", + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "loki", + "app.kubernetes.io/provider": "openshift", + "loki.grafana.com/name": "my-stack", + }, + }, + } + + // Create looks up the CR first, so we need to return our fake stack + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &stack) + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + } + if svc.Name == name.Name && svc.Namespace == name.Namespace { + k.SetClientObject(object, &svc) + } + return nil + } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags) + require.NoError(t, err) + + // make sure create not called + require.Zero(t, k.CreateCallCount()) + + // make sure update was called + require.NotZero(t, k.UpdateCallCount()) +} + +func TestCreateOrUpdateLokiStack_WhenCreateReturnsError_ContinueWithOtherObjects(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "someStack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + }, + }, + }, + } + + // GetStub looks up the CR first, so we need to return our fake stack + // return NotFound for everything else to trigger create. + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + // CreateStub returns an error for each resource to trigger reconciliation a new. + k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error { + return apierrors.NewTooManyRequestsError("too many create requests") + } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags) + + // make sure error is returned to re-trigger reconciliation + require.Error(t, err) +} + +func TestCreateOrUpdateLokiStack_WhenUpdateReturnsError_ContinueWithOtherObjects(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "someStack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + }, + }, + }, + } + + svc := corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "loki-gossip-ring-my-stack", + Namespace: "some-ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "loki", + "app.kubernetes.io/provider": "openshift", + "loki.grafana.com/name": "my-stack", + + // Add custom label to fake semantic not equal + "test": "test", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "loki.grafana.com/v1beta1", + Kind: "LokiStack", + Name: "someStack", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + Controller: pointer.BoolPtr(true), + BlockOwnerDeletion: pointer.BoolPtr(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: "gossip", + Port: 7946, + Protocol: "TCP", + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "loki", + "app.kubernetes.io/provider": "openshift", + "loki.grafana.com/name": "my-stack", + }, + }, + } + + // GetStub looks up the CR first, so we need to return our fake stack + // return NotFound for everything else to trigger create. + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &stack) + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + } + if svc.Name == name.Name && svc.Namespace == name.Namespace { + k.SetClientObject(object, &svc) + } + return nil + } + + // CreateStub returns an error for each resource to trigger reconciliation a new. + k.UpdateStub = func(_ context.Context, o client.Object, _ ...client.UpdateOption) error { + return apierrors.NewTooManyRequestsError("too many create requests") + } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags) + + // make sure error is returned to re-trigger reconciliation + require.Error(t, err) +} + +func TestCreateOrUpdateLokiStack_WhenMissingSecret_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := &lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + }, + }, + }, + } + + // GetStub looks up the CR first, so we need to return our fake stack + // return NotFound for everything else to trigger create. + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, stack) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags) + + // make sure error is returned to re-trigger reconciliation + require.NoError(t, err) + + // make sure status and status-update calls + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := &lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: invalidSecret.Name, + }, + }, + }, + } + + // GetStub looks up the CR first, so we need to return our fake stack + // return NotFound for everything else to trigger create. + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, stack) + return nil + } + if name.Name == invalidSecret.Name { + k.SetClientObject(object, &invalidSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags) + + // make sure error is returned to re-trigger reconciliation + require.NoError(t, err) + + // make sure status and status-update calls + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + ff := manifests.FeatureFlags{ + EnableGateway: true, + } + + stack := &lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: defaultGatewaySecret.Name, + }, + }, + }, + }, + Authorization: nil, + }, + }, + } + + // GetStub looks up the CR first, so we need to return our fake stack + // return NotFound for everything else to trigger create. + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, ff) + + // make sure error is returned to re-trigger reconciliation + require.NoError(t, err) + + // make sure status and status-update calls + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + ff := manifests.FeatureFlags{ + EnableGateway: true, + } + + stack := &lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: defaultGatewaySecret.Name, + }, + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + } + + // GetStub looks up the CR first, so we need to return our fake stack + // return NotFound for everything else to trigger create. + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + o, ok := object.(*lokiv1beta1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && ok { + k.SetClientObject(o, stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, ff) + + // make sure error is returned to re-trigger reconciliation + require.Error(t, err) + + // make sure status and status-update calls + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + ff := manifests.FeatureFlags{ + EnableGateway: true, + } + + stack := &lokiv1beta1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{ + Secret: lokiv1beta1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: invalidSecret.Name, + }, + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + } + + // GetStub looks up the CR first, so we need to return our fake stack + // return NotFound for everything else to trigger create. + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + o, ok := object.(*lokiv1beta1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && ok { + k.SetClientObject(o, stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + if name.Name == invalidSecret.Name { + k.SetClientObject(object, &invalidSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, ff) + + // make sure error is returned to re-trigger reconciliation + require.Error(t, err) + + // make sure status and status-update calls + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} diff --git a/operator/internal/manifests/build.go b/operator/internal/manifests/build.go new file mode 100644 index 0000000000..75c3f9973a --- /dev/null +++ b/operator/internal/manifests/build.go @@ -0,0 +1,111 @@ +package manifests + +import ( + "github.com/ViaQ/logerr/kverrors" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests/internal" + + "github.com/imdario/mergo" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// BuildAll builds all manifests required to run a Loki Stack +func BuildAll(opts Options) ([]client.Object, error) { + res := make([]client.Object, 0) + + cm, sha1C, mapErr := LokiConfigMap(opts) + if mapErr != nil { + return nil, mapErr + } + opts.ConfigSHA1 = sha1C + + distributorObjs, err := BuildDistributor(opts) + if err != nil { + return nil, err + } + + ingesterObjs, err := BuildIngester(opts) + if err != nil { + return nil, err + } + + querierObjs, err := BuildQuerier(opts) + if err != nil { + return nil, err + } + + compactorObjs, err := BuildCompactor(opts) + if err != nil { + return nil, err + } + + queryFrontendObjs, err := BuildQueryFrontend(opts) + if err != nil { + return nil, err + } + + indexGatewayObjs, err := BuildIndexGateway(opts) + if err != nil { + return nil, err + } + + res = append(res, cm) + res = append(res, distributorObjs...) + res = append(res, ingesterObjs...) + res = append(res, querierObjs...) + res = append(res, compactorObjs...) + res = append(res, queryFrontendObjs...) + res = append(res, indexGatewayObjs...) + res = append(res, BuildLokiGossipRingService(opts.Name)) + + if opts.Flags.EnableGateway { + gatewayObjects, err := BuildGateway(opts) + if err != nil { + return nil, err + } + + res = append(res, gatewayObjects...) + } + + if opts.Flags.EnableServiceMonitors { + res = append(res, BuildServiceMonitors(opts)...) + } + + return res, nil +} + +// DefaultLokiStackSpec returns the default configuration for a LokiStack of +// the specified size +func DefaultLokiStackSpec(size lokiv1beta1.LokiStackSizeType) *lokiv1beta1.LokiStackSpec { + defaults := internal.StackSizeTable[size] + return (&defaults).DeepCopy() +} + +// ApplyDefaultSettings manipulates the options to conform to +// build specifications +func ApplyDefaultSettings(opts *Options) error { + spec := DefaultLokiStackSpec(opts.Stack.Size) + + if err := mergo.Merge(spec, opts.Stack, mergo.WithOverride); err != nil { + return kverrors.Wrap(err, "failed merging stack user options", "name", opts.Name) + } + + strictOverrides := lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + // Compactor is a singelton application. + // Only one replica allowed!!! + Replicas: 1, + }, + }, + } + + if err := mergo.Merge(spec, strictOverrides, mergo.WithOverride); err != nil { + return kverrors.Wrap(err, "failed to merge strict defaults") + } + + opts.ResourceRequirements = internal.ResourceRequirementsTable[opts.Stack.Size] + opts.Stack = *spec + + return nil +} diff --git a/operator/internal/manifests/build_test.go b/operator/internal/manifests/build_test.go new file mode 100644 index 0000000000..7e2609a6ed --- /dev/null +++ b/operator/internal/manifests/build_test.go @@ -0,0 +1,302 @@ +package manifests + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests/internal" + "github.com/stretchr/testify/require" +) + +func TestApplyUserOptions_OverrideDefaults(t *testing.T) { + allSizes := []lokiv1beta1.LokiStackSizeType{ + lokiv1beta1.SizeOneXExtraSmall, + lokiv1beta1.SizeOneXSmall, + lokiv1beta1.SizeOneXMedium, + } + for _, size := range allSizes { + opt := Options{ + Name: "abcd", + Namespace: "efgh", + Stack: lokiv1beta1.LokiStackSpec{ + Size: size, + Template: &lokiv1beta1.LokiTemplateSpec{ + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 42, + }, + }, + }, + } + err := ApplyDefaultSettings(&opt) + defs := internal.StackSizeTable[size] + + require.NoError(t, err) + require.Equal(t, defs.Size, opt.Stack.Size) + require.Equal(t, defs.Limits, opt.Stack.Limits) + require.Equal(t, defs.ReplicationFactor, opt.Stack.ReplicationFactor) + require.Equal(t, defs.ManagementState, opt.Stack.ManagementState) + require.Equal(t, defs.Template.Ingester, opt.Stack.Template.Ingester) + require.Equal(t, defs.Template.Querier, opt.Stack.Template.Querier) + require.Equal(t, defs.Template.QueryFrontend, opt.Stack.Template.QueryFrontend) + + // Require distributor replicas to be set by user overwrite + require.NotEqual(t, defs.Template.Distributor.Replicas, opt.Stack.Template.Distributor.Replicas) + + // Require distributor tolerations and nodeselectors to use defaults + require.Equal(t, defs.Template.Distributor.Tolerations, opt.Stack.Template.Distributor.Tolerations) + require.Equal(t, defs.Template.Distributor.NodeSelector, opt.Stack.Template.Distributor.NodeSelector) + } +} + +func TestApplyUserOptions_AlwaysSetCompactorReplicasToOne(t *testing.T) { + allSizes := []lokiv1beta1.LokiStackSizeType{ + lokiv1beta1.SizeOneXExtraSmall, + lokiv1beta1.SizeOneXSmall, + lokiv1beta1.SizeOneXMedium, + } + for _, size := range allSizes { + opt := Options{ + Name: "abcd", + Namespace: "efgh", + Stack: lokiv1beta1.LokiStackSpec{ + Size: size, + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + }, + }, + } + err := ApplyDefaultSettings(&opt) + defs := internal.StackSizeTable[size] + + require.NoError(t, err) + + // Require compactor to be reverted to 1 replica + require.Equal(t, defs.Template.Compactor, opt.Stack.Template.Compactor) + } +} + +func TestBuildAll_WithFeatureFlags_EnableServiceMonitors(t *testing.T) { + type test struct { + desc string + MonitorCount int + BuildOptions Options + } + + table := []test{ + { + desc: "no service monitors created", + MonitorCount: 0, + BuildOptions: Options{ + Name: "test", + Namespace: "test", + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXSmall, + }, + Flags: FeatureFlags{ + EnableCertificateSigningService: false, + EnableServiceMonitors: false, + EnableTLSServiceMonitorConfig: false, + }, + }, + }, + { + desc: "service monitor per component created", + MonitorCount: 7, + BuildOptions: Options{ + Name: "test", + Namespace: "test", + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXSmall, + }, + Flags: FeatureFlags{ + EnableCertificateSigningService: false, + EnableServiceMonitors: true, + EnableTLSServiceMonitorConfig: false, + }, + }, + }, + } + + for _, tst := range table { + tst := tst + t.Run(tst.desc, func(t *testing.T) { + t.Parallel() + + err := ApplyDefaultSettings(&tst.BuildOptions) + require.NoError(t, err) + + objects, buildErr := BuildAll(tst.BuildOptions) + + require.NoError(t, buildErr) + require.Equal(t, tst.MonitorCount, serviceMonitorCount(objects)) + }) + } +} + +func TestBuildAll_WithFeatureFlags_EnableCertificateSigningService(t *testing.T) { + type test struct { + desc string + BuildOptions Options + } + + table := []test{ + { + desc: "disabled certificate signing service", + BuildOptions: Options{ + Name: "test", + Namespace: "test", + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXSmall, + }, + Flags: FeatureFlags{ + EnableCertificateSigningService: false, + EnableServiceMonitors: false, + EnableTLSServiceMonitorConfig: false, + }, + }, + }, + { + desc: "enabled certificate signing service for every http service", + BuildOptions: Options{ + Name: "test", + Namespace: "test", + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXSmall, + }, + Flags: FeatureFlags{ + EnableCertificateSigningService: true, + EnableServiceMonitors: false, + EnableTLSServiceMonitorConfig: false, + }, + }, + }, + } + + for _, tst := range table { + tst := tst + t.Run(tst.desc, func(t *testing.T) { + t.Parallel() + + err := ApplyDefaultSettings(&tst.BuildOptions) + require.NoError(t, err) + + httpServices := []*corev1.Service{ + NewDistributorHTTPService(tst.BuildOptions), + NewIngesterHTTPService(tst.BuildOptions), + NewQuerierHTTPService(tst.BuildOptions), + NewQueryFrontendHTTPService(tst.BuildOptions), + NewCompactorHTTPService(tst.BuildOptions), + NewIndexGatewayHTTPService(tst.BuildOptions), + NewGatewayHTTPService(tst.BuildOptions), + } + + for _, service := range httpServices { + if !tst.BuildOptions.Flags.EnableCertificateSigningService { + require.Equal(t, service.ObjectMeta.Annotations, map[string]string{}) + } else { + require.NotNil(t, service.ObjectMeta.Annotations["service.beta.openshift.io/serving-cert-secret-name"]) + } + } + }) + } +} + +func TestBuildAll_WithFeatureFlags_EnableGateway(t *testing.T) { + type test struct { + desc string + BuildOptions Options + } + table := []test{ + { + desc: "no lokistack-gateway created", + BuildOptions: Options{ + Name: "test", + Namespace: "test", + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXSmall, + }, + Flags: FeatureFlags{ + EnableGateway: false, + EnableTLSServiceMonitorConfig: false, + }, + }, + }, + { + desc: "lokistack-gateway created", + BuildOptions: Options{ + Name: "test", + Namespace: "test", + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.Dynamic, + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: "test", + }, + IssuerURL: "https://127.0.0.1:5556/dex", + RedirectURL: "https://localhost:8443/oidc/test/callback", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "http://127.0.0.1:8181/v1/data/observatorium/allow", + }, + }, + }, + }, + Flags: FeatureFlags{ + EnableGateway: true, + EnableTLSServiceMonitorConfig: true, + }, + }, + }, + } + for _, tst := range table { + tst := tst + t.Run(tst.desc, func(t *testing.T) { + t.Parallel() + err := ApplyDefaultSettings(&tst.BuildOptions) + require.NoError(t, err) + objects, buildErr := BuildAll(tst.BuildOptions) + require.NoError(t, buildErr) + if tst.BuildOptions.Flags.EnableGateway { + require.True(t, checkGatewayDeployed(objects, tst.BuildOptions.Name)) + } else { + require.False(t, checkGatewayDeployed(objects, tst.BuildOptions.Name)) + } + }) + } +} + +func serviceMonitorCount(objects []client.Object) int { + monitors := 0 + for _, obj := range objects { + if obj.GetObjectKind().GroupVersionKind().Kind == "ServiceMonitor" { + monitors++ + } + } + return monitors +} + +func checkGatewayDeployed(objects []client.Object, stackName string) bool { + for _, obj := range objects { + if obj.GetObjectKind().GroupVersionKind().Kind == "Deployment" && + obj.GetName() == GatewayName(stackName) { + return true + } + } + return false +} diff --git a/operator/internal/manifests/compactor.go b/operator/internal/manifests/compactor.go new file mode 100644 index 0000000000..7a1f291919 --- /dev/null +++ b/operator/internal/manifests/compactor.go @@ -0,0 +1,239 @@ +package manifests + +import ( + "fmt" + "path" + + "github.com/grafana/loki-operator/internal/manifests/internal/config" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// BuildCompactor builds the k8s objects required to run Loki Compactor. +func BuildCompactor(opts Options) ([]client.Object, error) { + statefulSet := NewCompactorStatefulSet(opts) + if opts.Flags.EnableTLSServiceMonitorConfig { + if err := configureCompactorServiceMonitorPKI(statefulSet, opts.Name); err != nil { + return nil, err + } + } + + return []client.Object{ + statefulSet, + NewCompactorGRPCService(opts), + NewCompactorHTTPService(opts), + }, nil +} + +// NewCompactorStatefulSet creates a statefulset object for a compactor. +func NewCompactorStatefulSet(opts Options) *appsv1.StatefulSet { + podSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: configVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: lokiConfigMapName(opts.Name), + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Image: opts.Image, + Name: "loki-compactor", + Resources: corev1.ResourceRequirements{ + Limits: opts.ResourceRequirements.Compactor.Limits, + Requests: opts.ResourceRequirements.Compactor.Requests, + }, + Args: []string{ + "-target=compactor", + fmt.Sprintf("-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), + fmt.Sprintf("-runtime-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + PeriodSeconds: 10, + InitialDelaySeconds: 15, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/metrics", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + SuccessThreshold: 1, + }, + Ports: []corev1.ContainerPort{ + { + Name: lokiHTTPPortName, + ContainerPort: httpPort, + Protocol: protocolTCP, + }, + { + Name: lokiGRPCPortName, + ContainerPort: grpcPort, + Protocol: protocolTCP, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: configVolumeName, + ReadOnly: false, + MountPath: config.LokiConfigMountDir, + }, + { + Name: storageVolumeName, + ReadOnly: false, + MountPath: dataDirectory, + }, + }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "File", + ImagePullPolicy: "IfNotPresent", + }, + }, + } + + if opts.Stack.Template != nil && opts.Stack.Template.Compactor != nil { + podSpec.Tolerations = opts.Stack.Template.Compactor.Tolerations + podSpec.NodeSelector = opts.Stack.Template.Compactor.NodeSelector + } + + l := ComponentLabels(LabelCompactorComponent, opts.Name) + a := commonAnnotations(opts.ConfigSHA1) + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: CompactorName(opts.Name), + Labels: l, + }, + Spec: appsv1.StatefulSetSpec{ + PodManagementPolicy: appsv1.OrderedReadyPodManagement, + RevisionHistoryLimit: pointer.Int32Ptr(10), + Replicas: pointer.Int32Ptr(opts.Stack.Template.Compactor.Replicas), + Selector: &metav1.LabelSelector{ + MatchLabels: labels.Merge(l, GossipLabels()), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("loki-compactor-%s", opts.Name), + Labels: labels.Merge(l, GossipLabels()), + Annotations: a, + }, + Spec: podSpec, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Labels: l, + Name: storageVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + // TODO: should we verify that this is possible with the given storage class first? + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: opts.ResourceRequirements.Compactor.PVCSize, + }, + }, + VolumeMode: &volumeFileSystemMode, + StorageClassName: pointer.StringPtr(opts.Stack.StorageClassName), + }, + }, + }, + }, + } +} + +// NewCompactorGRPCService creates a k8s service for the compactor GRPC endpoint +func NewCompactorGRPCService(opts Options) *corev1.Service { + l := ComponentLabels(LabelCompactorComponent, opts.Name) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceNameCompactorGRPC(opts.Name), + Labels: l, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: lokiGRPCPortName, + Port: grpcPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: grpcPort}, + }, + }, + Selector: l, + }, + } +} + +// NewCompactorHTTPService creates a k8s service for the ingester HTTP endpoint +func NewCompactorHTTPService(opts Options) *corev1.Service { + serviceName := serviceNameCompactorHTTP(opts.Name) + l := ComponentLabels(LabelCompactorComponent, opts.Name) + a := serviceAnnotations(serviceName, opts.Flags.EnableCertificateSigningService) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Labels: l, + Annotations: a, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: lokiHTTPPortName, + Port: httpPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: httpPort}, + }, + }, + Selector: l, + }, + } +} + +func configureCompactorServiceMonitorPKI(statefulSet *appsv1.StatefulSet, stackName string) error { + serviceName := serviceNameCompactorHTTP(stackName) + return configureServiceMonitorPKI(&statefulSet.Spec.Template.Spec, serviceName) +} diff --git a/operator/internal/manifests/compactor_test.go b/operator/internal/manifests/compactor_test.go new file mode 100644 index 0000000000..3744c5f690 --- /dev/null +++ b/operator/internal/manifests/compactor_test.go @@ -0,0 +1,56 @@ +package manifests_test + +import ( + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/stretchr/testify/require" +) + +func TestNewCompactorStatefulSet_SelectorMatchesLabels(t *testing.T) { + // You must set the .spec.selector field of a StatefulSet to match the labels of + // its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the + // .spec.selector field was defaulted when omitted. In 1.8 and later versions, + // failing to specify a matching Pod Selector will result in a validation error + // during StatefulSet creation. + // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector + sts := manifests.NewCompactorStatefulSet(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + Stack: lokiv1beta1.LokiStackSpec{ + StorageClassName: "standard", + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + + l := sts.Spec.Template.GetObjectMeta().GetLabels() + for key, value := range sts.Spec.Selector.MatchLabels { + require.Contains(t, l, key) + require.Equal(t, l[key], value) + } +} + +func TestNewCompactorStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) { + ss := manifests.NewCompactorStatefulSet(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + ConfigSHA1: "deadbeef", + Stack: lokiv1beta1.LokiStackSpec{ + StorageClassName: "standard", + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + expected := "loki.grafana.com/config-hash" + annotations := ss.Spec.Template.Annotations + require.Contains(t, annotations, expected) + require.Equal(t, annotations[expected], "deadbeef") +} diff --git a/operator/internal/manifests/config.go b/operator/internal/manifests/config.go new file mode 100644 index 0000000000..be7b9bff17 --- /dev/null +++ b/operator/internal/manifests/config.go @@ -0,0 +1,86 @@ +package manifests + +import ( + "crypto/sha1" + "fmt" + + "github.com/grafana/loki-operator/internal/manifests/internal/config" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LokiConfigMap creates the single configmap containing the loki configuration for the whole cluster +func LokiConfigMap(opt Options) (*corev1.ConfigMap, string, error) { + cfg := ConfigOptions(opt) + c, rc, err := config.Build(cfg) + if err != nil { + return nil, "", err + } + + s := sha1.New() + _, err = s.Write(c) + if err != nil { + return nil, "", err + } + sha1C := fmt.Sprintf("%x", s.Sum(nil)) + + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: lokiConfigMapName(opt.Name), + Labels: commonLabels(opt.Name), + }, + BinaryData: map[string][]byte{ + config.LokiConfigFileName: c, + config.LokiRuntimeConfigFileName: rc, + }, + }, sha1C, nil +} + +// ConfigOptions converts Options to config.Options +func ConfigOptions(opt Options) config.Options { + return config.Options{ + Stack: opt.Stack, + Namespace: opt.Namespace, + Name: opt.Name, + FrontendWorker: config.Address{ + FQDN: fqdn(NewQueryFrontendGRPCService(opt).GetName(), opt.Namespace), + Port: grpcPort, + }, + GossipRing: config.Address{ + FQDN: fqdn(BuildLokiGossipRingService(opt.Name).GetName(), opt.Namespace), + Port: gossipPort, + }, + Querier: config.Address{ + FQDN: fqdn(NewQuerierHTTPService(opt).GetName(), opt.Namespace), + Port: httpPort, + }, + IndexGateway: config.Address{ + FQDN: fqdn(NewIndexGatewayGRPCService(opt).GetName(), opt.Namespace), + Port: grpcPort, + }, + StorageDirectory: dataDirectory, + ObjectStorage: config.ObjectStorage{ + Endpoint: opt.ObjectStorage.Endpoint, + Buckets: opt.ObjectStorage.Buckets, + Region: opt.ObjectStorage.Region, + AccessKeyID: opt.ObjectStorage.AccessKeyID, + AccessKeySecret: opt.ObjectStorage.AccessKeySecret, + }, + QueryParallelism: config.Parallelism{ + QuerierCPULimits: opt.ResourceRequirements.Querier.Requests.Cpu().Value(), + QueryFrontendReplicas: opt.Stack.Template.QueryFrontend.Replicas, + }, + WriteAheadLog: config.WriteAheadLog{ + Directory: walDirectory, + IngesterMemoryRequest: opt.ResourceRequirements.Ingester.Requests.Memory().Value(), + }, + } +} + +func lokiConfigMapName(stackName string) string { + return fmt.Sprintf("loki-config-%s", stackName) +} diff --git a/operator/internal/manifests/config_test.go b/operator/internal/manifests/config_test.go new file mode 100644 index 0000000000..979f63e896 --- /dev/null +++ b/operator/internal/manifests/config_test.go @@ -0,0 +1,182 @@ +package manifests_test + +import ( + "encoding/json" + "math/rand" + "testing" + + "github.com/google/uuid" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" +) + +func TestConfigMap_ReturnsSHA1OfBinaryContents(t *testing.T) { + opts := randomConfigOptions() + + _, sha1C, err := manifests.LokiConfigMap(opts) + require.NoError(t, err) + require.NotEmpty(t, sha1C) +} + +func TestConfigOptions_UserOptionsTakePrecedence(t *testing.T) { + // regardless of what is provided by the default sizing parameters we should always prefer + // the user-defined values. This creates an all-inclusive manifests.Options and then checks + // that every value is present in the result + opts := randomConfigOptions() + + res := manifests.ConfigOptions(opts) + + expected, err := json.Marshal(opts.Stack) + require.NoError(t, err) + + actual, err := json.Marshal(res.Stack) + require.NoError(t, err) + + assert.JSONEq(t, string(expected), string(actual)) +} + +func randomConfigOptions() manifests.Options { + return manifests.Options{ + Name: uuid.New().String(), + Namespace: uuid.New().String(), + Image: uuid.New().String(), + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Storage: lokiv1beta1.ObjectStorageSpec{}, + StorageClassName: uuid.New().String(), + ReplicationFactor: rand.Int31(), + Limits: &lokiv1beta1.LimitsSpec{ + Global: &lokiv1beta1.LimitsTemplateSpec{ + IngestionLimits: &lokiv1beta1.IngestionLimitSpec{ + IngestionRate: rand.Int31(), + IngestionBurstSize: rand.Int31(), + MaxLabelNameLength: rand.Int31(), + MaxLabelValueLength: rand.Int31(), + MaxLabelNamesPerSeries: rand.Int31(), + MaxGlobalStreamsPerTenant: rand.Int31(), + MaxLineSize: rand.Int31(), + }, + QueryLimits: &lokiv1beta1.QueryLimitSpec{ + MaxEntriesLimitPerQuery: rand.Int31(), + MaxChunksPerQuery: rand.Int31(), + MaxQuerySeries: rand.Int31(), + }, + }, + Tenants: map[string]lokiv1beta1.LimitsTemplateSpec{ + uuid.New().String(): { + IngestionLimits: &lokiv1beta1.IngestionLimitSpec{ + IngestionRate: rand.Int31(), + IngestionBurstSize: rand.Int31(), + MaxLabelNameLength: rand.Int31(), + MaxLabelValueLength: rand.Int31(), + MaxLabelNamesPerSeries: rand.Int31(), + MaxGlobalStreamsPerTenant: rand.Int31(), + MaxLineSize: rand.Int31(), + }, + QueryLimits: &lokiv1beta1.QueryLimitSpec{ + MaxEntriesLimitPerQuery: rand.Int31(), + MaxChunksPerQuery: rand.Int31(), + MaxQuerySeries: rand.Int31(), + }, + }, + }, + }, + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + NodeSelector: map[string]string{ + uuid.New().String(): uuid.New().String(), + }, + Tolerations: []corev1.Toleration{ + { + Key: uuid.New().String(), + Operator: corev1.TolerationOpEqual, + Value: uuid.New().String(), + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: pointer.Int64Ptr(rand.Int63()), + }, + }, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + NodeSelector: map[string]string{ + uuid.New().String(): uuid.New().String(), + }, + Tolerations: []corev1.Toleration{ + { + Key: uuid.New().String(), + Operator: corev1.TolerationOpEqual, + Value: uuid.New().String(), + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: pointer.Int64Ptr(rand.Int63()), + }, + }, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + NodeSelector: map[string]string{ + uuid.New().String(): uuid.New().String(), + }, + Tolerations: []corev1.Toleration{ + { + Key: uuid.New().String(), + Operator: corev1.TolerationOpEqual, + Value: uuid.New().String(), + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: pointer.Int64Ptr(rand.Int63()), + }, + }, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + NodeSelector: map[string]string{ + uuid.New().String(): uuid.New().String(), + }, + Tolerations: []corev1.Toleration{ + { + Key: uuid.New().String(), + Operator: corev1.TolerationOpEqual, + Value: uuid.New().String(), + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: pointer.Int64Ptr(rand.Int63()), + }, + }, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + NodeSelector: map[string]string{ + uuid.New().String(): uuid.New().String(), + }, + Tolerations: []corev1.Toleration{ + { + Key: uuid.New().String(), + Operator: corev1.TolerationOpEqual, + Value: uuid.New().String(), + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: pointer.Int64Ptr(rand.Int63()), + }, + }, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + NodeSelector: map[string]string{ + uuid.New().String(): uuid.New().String(), + }, + Tolerations: []corev1.Toleration{ + { + Key: uuid.New().String(), + Operator: corev1.TolerationOpEqual, + Value: uuid.New().String(), + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: pointer.Int64Ptr(rand.Int63()), + }, + }, + }, + }, + }, + } +} diff --git a/operator/internal/manifests/distributor.go b/operator/internal/manifests/distributor.go new file mode 100644 index 0000000000..35e13ce82e --- /dev/null +++ b/operator/internal/manifests/distributor.go @@ -0,0 +1,238 @@ +package manifests + +import ( + "fmt" + "path" + + "github.com/grafana/loki-operator/internal/manifests/internal/config" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + walVolumeName = "wal" + configVolumeName = "config" + storageVolumeName = "storage" + walDirectory = "/tmp/wal" + dataDirectory = "/tmp/loki" + secretDirectory = "/etc/proxy/secrets" +) + +// BuildDistributor returns a list of k8s objects for Loki Distributor +func BuildDistributor(opts Options) ([]client.Object, error) { + deployment := NewDistributorDeployment(opts) + if opts.Flags.EnableTLSServiceMonitorConfig { + if err := configureDistributorServiceMonitorPKI(deployment, opts.Name); err != nil { + return nil, err + } + } + + return []client.Object{ + deployment, + NewDistributorGRPCService(opts), + NewDistributorHTTPService(opts), + }, nil +} + +// NewDistributorDeployment creates a deployment object for a distributor +func NewDistributorDeployment(opts Options) *appsv1.Deployment { + podSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: configVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: lokiConfigMapName(opts.Name), + }, + }, + }, + }, + { + Name: storageVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + Containers: []corev1.Container{ + { + Image: opts.Image, + Name: "loki-distributor", + Resources: corev1.ResourceRequirements{ + Limits: opts.ResourceRequirements.Distributor.Limits, + Requests: opts.ResourceRequirements.Distributor.Requests, + }, + Args: []string{ + "-target=distributor", + fmt.Sprintf("-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), + fmt.Sprintf("-runtime-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + PeriodSeconds: 10, + InitialDelaySeconds: 15, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/metrics", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + SuccessThreshold: 1, + }, + Ports: []corev1.ContainerPort{ + { + Name: lokiHTTPPortName, + ContainerPort: httpPort, + Protocol: protocolTCP, + }, + { + Name: lokiGRPCPortName, + ContainerPort: grpcPort, + Protocol: protocolTCP, + }, + { + Name: lokiGossipPortName, + ContainerPort: gossipPort, + Protocol: protocolTCP, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: configVolumeName, + ReadOnly: false, + MountPath: config.LokiConfigMountDir, + }, + { + Name: storageVolumeName, + ReadOnly: false, + MountPath: dataDirectory, + }, + }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "File", + ImagePullPolicy: "IfNotPresent", + }, + }, + } + + if opts.Stack.Template != nil && opts.Stack.Template.Distributor != nil { + podSpec.Tolerations = opts.Stack.Template.Distributor.Tolerations + podSpec.NodeSelector = opts.Stack.Template.Distributor.NodeSelector + } + + l := ComponentLabels(LabelDistributorComponent, opts.Name) + a := commonAnnotations(opts.ConfigSHA1) + + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: DistributorName(opts.Name), + Labels: l, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32Ptr(opts.Stack.Template.Distributor.Replicas), + Selector: &metav1.LabelSelector{ + MatchLabels: labels.Merge(l, GossipLabels()), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("loki-distributor-%s", opts.Name), + Labels: labels.Merge(l, GossipLabels()), + Annotations: a, + }, + Spec: podSpec, + }, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + }, + }, + } +} + +// NewDistributorGRPCService creates a k8s service for the distributor GRPC endpoint +func NewDistributorGRPCService(opts Options) *corev1.Service { + l := ComponentLabels(LabelDistributorComponent, opts.Name) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceNameDistributorGRPC(opts.Name), + Labels: l, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: lokiGRPCPortName, + Port: grpcPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: grpcPort}, + }, + }, + Selector: l, + }, + } +} + +// NewDistributorHTTPService creates a k8s service for the distributor HTTP endpoint +func NewDistributorHTTPService(opts Options) *corev1.Service { + serviceName := serviceNameDistributorHTTP(opts.Name) + l := ComponentLabels(LabelDistributorComponent, opts.Name) + a := serviceAnnotations(serviceName, opts.Flags.EnableCertificateSigningService) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Labels: l, + Annotations: a, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: lokiHTTPPortName, + Port: httpPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: httpPort}, + }, + }, + Selector: l, + }, + } +} + +func configureDistributorServiceMonitorPKI(deployment *appsv1.Deployment, stackName string) error { + serviceName := serviceNameDistributorHTTP(stackName) + return configureServiceMonitorPKI(&deployment.Spec.Template.Spec, serviceName) +} diff --git a/operator/internal/manifests/distributor_test.go b/operator/internal/manifests/distributor_test.go new file mode 100644 index 0000000000..3367065dab --- /dev/null +++ b/operator/internal/manifests/distributor_test.go @@ -0,0 +1,49 @@ +package manifests_test + +import ( + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/stretchr/testify/require" +) + +func TestNewDistributorDeployment_SelectorMatchesLabels(t *testing.T) { + dpl := manifests.NewDistributorDeployment(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + + l := dpl.Spec.Template.GetObjectMeta().GetLabels() + for key, value := range dpl.Spec.Selector.MatchLabels { + require.Contains(t, l, key) + require.Equal(t, l[key], value) + } +} + +func TestNewDistributorDeployme_HasTemplateConfigHashAnnotation(t *testing.T) { + ss := manifests.NewDistributorDeployment(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + ConfigSHA1: "deadbeef", + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + + expected := "loki.grafana.com/config-hash" + annotations := ss.Spec.Template.Annotations + require.Contains(t, annotations, expected) + require.Equal(t, annotations[expected], "deadbeef") +} diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go new file mode 100644 index 0000000000..3469da45a3 --- /dev/null +++ b/operator/internal/manifests/gateway.go @@ -0,0 +1,420 @@ +package manifests + +import ( + "crypto/sha1" + "fmt" + "path" + + "github.com/ViaQ/logerr/kverrors" + "github.com/imdario/mergo" + + "github.com/grafana/loki-operator/internal/manifests/internal/gateway" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + tlsMetricsSercetVolume = "tls-metrics-secret" +) + +// BuildGateway returns a list of k8s objects for Loki Stack Gateway +func BuildGateway(opts Options) ([]client.Object, error) { + cm, sha1C, err := gatewayConfigMap(opts) + if err != nil { + return nil, err + } + + dpl := NewGatewayDeployment(opts, sha1C) + svc := NewGatewayHTTPService(opts) + + ing, err := NewGatewayIngress(opts) + if err != nil { + return nil, err + } + + objs := []client.Object{cm, dpl, svc, ing} + + if opts.Flags.EnableTLSServiceMonitorConfig { + serviceName := serviceNameGatewayHTTP(opts.Name) + if err := configureGatewayMetricsPKI(&dpl.Spec.Template.Spec, serviceName); err != nil { + return nil, err + } + } + + if opts.Stack.Tenants != nil { + mode := opts.Stack.Tenants.Mode + if err := configureDeploymentForMode(dpl, mode, opts.Flags); err != nil { + return nil, err + } + + if err := configureServiceForMode(&svc.Spec, mode); err != nil { + return nil, err + } + + objs = configureGatewayObjsForMode(objs, opts) + } + + return objs, nil +} + +// NewGatewayDeployment creates a deployment object for a lokiStack-gateway +func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment { + podSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "rbac", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: LabelGatewayComponent, + }, + }, + }, + }, + { + Name: "tenants", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: LabelGatewayComponent, + }, + }, + }, + }, + { + Name: "lokistack-gateway", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: LabelGatewayComponent, + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: gatewayContainerName, + Image: opts.GatewayImage, + Resources: corev1.ResourceRequirements{ + Limits: opts.ResourceRequirements.Gateway.Limits, + Requests: opts.ResourceRequirements.Gateway.Requests, + }, + Args: []string{ + fmt.Sprintf("--debug.name=%s", LabelGatewayComponent), + fmt.Sprintf("--web.listen=0.0.0.0:%d", gatewayHTTPPort), + fmt.Sprintf("--web.internal.listen=0.0.0.0:%d", gatewayInternalPort), + fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", gatewayHTTPPort), + "--log.level=warn", + fmt.Sprintf("--logs.read.endpoint=http://%s:%d", fqdn(serviceNameQueryFrontendHTTP(opts.Name), opts.Namespace), httpPort), + fmt.Sprintf("--logs.tail.endpoint=http://%s:%d", fqdn(serviceNameQueryFrontendHTTP(opts.Name), opts.Namespace), httpPort), + fmt.Sprintf("--logs.write.endpoint=http://%s:%d", fqdn(serviceNameDistributorHTTP(opts.Name), opts.Namespace), httpPort), + fmt.Sprintf("--rbac.config=%s", path.Join(gateway.LokiGatewayMountDir, gateway.LokiGatewayRbacFileName)), + fmt.Sprintf("--tenants.config=%s", path.Join(gateway.LokiGatewayMountDir, gateway.LokiGatewayTenantFileName)), + }, + Ports: []corev1.ContainerPort{ + { + Name: gatewayInternalPortName, + ContainerPort: gatewayInternalPort, + }, + { + Name: gatewayHTTPPortName, + ContainerPort: gatewayHTTPPort, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "rbac", + ReadOnly: true, + MountPath: path.Join(gateway.LokiGatewayMountDir, gateway.LokiGatewayRbacFileName), + SubPath: "rbac.yaml", + }, + { + Name: "tenants", + ReadOnly: true, + MountPath: path.Join(gateway.LokiGatewayMountDir, gateway.LokiGatewayTenantFileName), + SubPath: "tenants.yaml", + }, + { + Name: "lokistack-gateway", + ReadOnly: true, + MountPath: path.Join(gateway.LokiGatewayMountDir, gateway.LokiGatewayRegoFileName), + SubPath: "lokistack-gateway.rego", + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/live", + Port: intstr.FromInt(gatewayInternalPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.FromInt(gatewayInternalPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + TimeoutSeconds: 1, + PeriodSeconds: 5, + FailureThreshold: 12, + }, + }, + }, + } + + l := ComponentLabels(LabelGatewayComponent, opts.Name) + a := commonAnnotations(sha1C) + + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: GatewayName(opts.Name), + Labels: l, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32Ptr(1), + Selector: &metav1.LabelSelector{ + MatchLabels: l, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: GatewayName(opts.Name), + Labels: l, + Annotations: a, + }, + Spec: podSpec, + }, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + }, + }, + } +} + +// NewGatewayHTTPService creates a k8s service for the lokistack-gateway HTTP endpoint +func NewGatewayHTTPService(opts Options) *corev1.Service { + serviceName := serviceNameGatewayHTTP(opts.Name) + l := ComponentLabels(LabelGatewayComponent, opts.Name) + a := serviceAnnotations(serviceName, opts.Flags.EnableCertificateSigningService) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Labels: l, + Annotations: a, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: gatewayHTTPPortName, + Port: gatewayHTTPPort, + }, + { + Name: gatewayInternalPortName, + Port: gatewayInternalPort, + }, + }, + Selector: l, + }, + } +} + +// NewGatewayIngress creates a k8s Ingress object for accessing +// the lokistack-gateway from public. +func NewGatewayIngress(opts Options) (*networkingv1.Ingress, error) { + pt := networkingv1.PathTypePrefix + l := ComponentLabels(LabelGatewayComponent, opts.Name) + + ingBackend := networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: serviceNameGatewayHTTP(opts.Name), + Port: networkingv1.ServiceBackendPort{ + Name: gatewayHTTPPortName, + }, + }, + } + + return &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{ + Kind: "Ingress", + APIVersion: networkingv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Labels: l, + Name: opts.Name, + Namespace: opts.Namespace, + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: &ingBackend, + Rules: []networkingv1.IngressRule{ + { + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/api/logs/v1", + PathType: &pt, + Backend: ingBackend, + }, + }, + }, + }, + }, + }, + }, + }, nil +} + +// gatewayConfigMap creates a configMap for rbac.yaml and tenants.yaml +func gatewayConfigMap(opt Options) (*corev1.ConfigMap, string, error) { + cfg := gatewayConfigOptions(opt) + rbacConfig, tenantsConfig, regoConfig, err := gateway.Build(cfg) + if err != nil { + return nil, "", err + } + + s := sha1.New() + _, err = s.Write(tenantsConfig) + if err != nil { + return nil, "", err + } + sha1C := fmt.Sprintf("%x", s.Sum(nil)) + + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: LabelGatewayComponent, + Labels: commonLabels(opt.Name), + }, + BinaryData: map[string][]byte{ + gateway.LokiGatewayRbacFileName: rbacConfig, + gateway.LokiGatewayTenantFileName: tenantsConfig, + gateway.LokiGatewayRegoFileName: regoConfig, + }, + }, sha1C, nil +} + +// gatewayConfigOptions converts Options to gateway.Options +func gatewayConfigOptions(opt Options) gateway.Options { + var gatewaySecrets []*gateway.Secret + for _, secret := range opt.TenantSecrets { + gatewaySecret := &gateway.Secret{ + TenantName: secret.TenantName, + ClientID: secret.ClientID, + ClientSecret: secret.ClientSecret, + IssuerCAPath: secret.IssuerCAPath, + } + gatewaySecrets = append(gatewaySecrets, gatewaySecret) + } + + tenantConfigMap := make(map[string]gateway.TenantData) + for tenant, tenantData := range opt.TenantConfigMap { + tenantConfigMap[tenant] = gateway.TenantData{ + TenantID: tenantData.TenantID, + CookieSecret: tenantData.CookieSecret, + } + } + + return gateway.Options{ + Stack: opt.Stack, + Namespace: opt.Namespace, + Name: opt.Name, + OpenShiftOptions: opt.OpenShiftOptions, + TenantSecrets: gatewaySecrets, + TenantConfigMap: tenantConfigMap, + } +} + +func configureGatewayMetricsPKI(podSpec *corev1.PodSpec, serviceName string) error { + var gwIndex int + for i, c := range podSpec.Containers { + if c.Name == gatewayContainerName { + gwIndex = i + break + } + } + + secretName := signingServiceSecretName(serviceName) + certFile := path.Join(gateway.LokiGatewayTLSDir, gateway.LokiGatewayCertFile) + keyFile := path.Join(gateway.LokiGatewayTLSDir, gateway.LokiGatewayKeyFile) + + secretVolumeSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: tlsMetricsSercetVolume, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }, + }, + } + secretContainerSpec := corev1.Container{ + VolumeMounts: []corev1.VolumeMount{ + { + Name: tlsMetricsSercetVolume, + ReadOnly: true, + MountPath: gateway.LokiGatewayTLSDir, + }, + }, + Args: []string{ + fmt.Sprintf("--tls.internal.server.cert-file=%s", certFile), + fmt.Sprintf("--tls.internal.server.key-file=%s", keyFile), + }, + } + uriSchemeContainerSpec := corev1.Container{ + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Scheme: corev1.URISchemeHTTPS, + }, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Scheme: corev1.URISchemeHTTPS, + }, + }, + }, + } + + if err := mergo.Merge(podSpec, secretVolumeSpec, mergo.WithAppendSlice); err != nil { + return kverrors.Wrap(err, "failed to merge volumes") + } + + if err := mergo.Merge(&podSpec.Containers[gwIndex], secretContainerSpec, mergo.WithAppendSlice); err != nil { + return kverrors.Wrap(err, "failed to merge container") + } + + if err := mergo.Merge(&podSpec.Containers[gwIndex], uriSchemeContainerSpec, mergo.WithOverride); err != nil { + return kverrors.Wrap(err, "failed to merge container") + } + + return nil +} diff --git a/operator/internal/manifests/gateway_tenants.go b/operator/internal/manifests/gateway_tenants.go new file mode 100644 index 0000000000..a4789225de --- /dev/null +++ b/operator/internal/manifests/gateway_tenants.go @@ -0,0 +1,119 @@ +package manifests + +import ( + "github.com/ViaQ/logerr/kverrors" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests/internal/gateway" + "github.com/grafana/loki-operator/internal/manifests/openshift" + "github.com/imdario/mergo" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ApplyGatewayDefaultOptions applies defaults on the LokiStackSpec depending on selected +// tenant mode. Currently nothing is applied for modes static and dynamic. For mode openshift-logging +// the tenant spec is filled with defaults for authentication and authorization. +func ApplyGatewayDefaultOptions(opts *Options) error { + if opts.Stack.Tenants == nil { + return nil + } + + switch opts.Stack.Tenants.Mode { + case lokiv1beta1.Static, lokiv1beta1.Dynamic: + return nil // continue using user input + + case lokiv1beta1.OpenshiftLogging: + defaults := openshift.NewOptions( + opts.Name, + GatewayName(opts.Name), + opts.Namespace, + opts.GatewayBaseDomain, + serviceNameGatewayHTTP(opts.Name), + gatewayHTTPPortName, + ComponentLabels(LabelGatewayComponent, opts.Name), + opts.Flags.EnableCertificateSigningService, + opts.TenantConfigMap, + ) + + if err := mergo.Merge(&opts.OpenShiftOptions, &defaults, mergo.WithOverride); err != nil { + return kverrors.Wrap(err, "failed to merge defaults for mode openshift logging") + } + + } + + return nil +} + +func configureDeploymentForMode(d *appsv1.Deployment, mode lokiv1beta1.ModeType, flags FeatureFlags) error { + switch mode { + case lokiv1beta1.Static, lokiv1beta1.Dynamic: + return nil // nothing to configure + case lokiv1beta1.OpenshiftLogging: + return openshift.ConfigureGatewayDeployment( + d, + gatewayContainerName, + tlsMetricsSercetVolume, + gateway.LokiGatewayTLSDir, + gateway.LokiGatewayCertFile, + gateway.LokiGatewayKeyFile, + gateway.LokiGatewayCABundleDir, + gateway.LokiGatewayCAFile, + flags.EnableTLSServiceMonitorConfig, + flags.EnableCertificateSigningService, + ) + } + + return nil +} + +func configureServiceForMode(s *corev1.ServiceSpec, mode lokiv1beta1.ModeType) error { + switch mode { + case lokiv1beta1.Static, lokiv1beta1.Dynamic: + return nil // nothing to configure + case lokiv1beta1.OpenshiftLogging: + return openshift.ConfigureGatewayService(s) + } + + return nil +} + +func configureGatewayObjsForMode(objs []client.Object, opts Options) []client.Object { + switch opts.Stack.Tenants.Mode { + case lokiv1beta1.Static, lokiv1beta1.Dynamic: + // nothing to configure + case lokiv1beta1.OpenshiftLogging: + openShiftObjs := openshift.Build(opts.OpenShiftOptions) + + var cObjs []client.Object + for _, o := range objs { + switch o.(type) { + // Drop Ingress in favor of Route in OpenShift. + // Ingress is not supported as OAuthRedirectReference + // in ServiceAccounts used as OAuthClient in OpenShift. + case *networkingv1.Ingress: + continue + } + + cObjs = append(cObjs, o) + } + + objs = append(cObjs, openShiftObjs...) + } + + return objs +} + +func configureServiceMonitorForMode(sm *monitoringv1.ServiceMonitor, mode lokiv1beta1.ModeType, flags FeatureFlags) error { + switch mode { + case lokiv1beta1.Static, lokiv1beta1.Dynamic: + return nil // nothing to configure + case lokiv1beta1.OpenshiftLogging: + return openshift.ConfigureGatewayServiceMonitor(sm, flags.EnableTLSServiceMonitorConfig) + } + + return nil +} diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go new file mode 100644 index 0000000000..56ad20d1db --- /dev/null +++ b/operator/internal/manifests/gateway_tenants_test.go @@ -0,0 +1,658 @@ +package manifests + +import ( + "testing" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "github.com/stretchr/testify/require" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests/internal/gateway" + "github.com/grafana/loki-operator/internal/manifests/openshift" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func TestApplyGatewayDefaultsOptions(t *testing.T) { + type tt struct { + desc string + opts *Options + want *Options + } + + tc := []tt{ + { + desc: "static mode", + opts: &Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.Static, + }, + }, + }, + want: &Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.Static, + }, + }, + }, + }, + { + desc: "dynamic mode", + opts: &Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.Dynamic, + }, + }, + }, + want: &Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.Dynamic, + }, + }, + }, + }, + { + desc: "openshift-logging mode", + opts: &Options{ + Name: "lokistack-ocp", + Namespace: "stack-ns", + GatewayBaseDomain: "example.com", + Stack: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.OpenshiftLogging, + }, + }, + }, + want: &Options{ + Name: "lokistack-ocp", + Namespace: "stack-ns", + GatewayBaseDomain: "example.com", + Stack: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.OpenshiftLogging, + }, + }, + OpenShiftOptions: openshift.Options{ + BuildOpts: openshift.BuildOptions{ + LokiStackName: "lokistack-ocp", + GatewayName: "lokistack-gateway-lokistack-ocp", + GatewayNamespace: "stack-ns", + GatewaySvcName: "lokistack-gateway-http-lokistack-ocp", + GatewaySvcTargetPort: "public", + Labels: ComponentLabels(LabelGatewayComponent, "lokistack-ocp"), + }, + Authentication: []openshift.AuthenticationSpec{ + { + TenantName: "application", + TenantID: "", + ServiceAccount: "lokistack-gateway-lokistack-ocp", + RedirectURL: "http://lokistack-ocp-stack-ns.apps.example.com/openshift/application/callback", + }, + { + TenantName: "infrastructure", + TenantID: "", + ServiceAccount: "lokistack-gateway-lokistack-ocp", + RedirectURL: "http://lokistack-ocp-stack-ns.apps.example.com/openshift/infrastructure/callback", + }, + { + TenantName: "audit", + TenantID: "", + ServiceAccount: "lokistack-gateway-lokistack-ocp", + RedirectURL: "http://lokistack-ocp-stack-ns.apps.example.com/openshift/audit/callback", + }, + }, + Authorization: openshift.AuthorizationSpec{ + OPAUrl: "http://localhost:8082/v1/data/lokistack/allow", + }, + }, + }, + }, + } + for _, tc := range tc { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + t.Parallel() + err := ApplyGatewayDefaultOptions(tc.opts) + require.NoError(t, err) + + for i, a := range tc.opts.OpenShiftOptions.Authentication { + require.NotEmpty(t, a.TenantID) + require.NotEmpty(t, a.CookieSecret) + require.Len(t, a.CookieSecret, 32) + + a.TenantID = "" + a.CookieSecret = "" + tc.opts.OpenShiftOptions.Authentication[i] = a + tc.opts.OpenShiftOptions.Authentication[i] = a + } + + require.Equal(t, tc.want, tc.opts) + }) + } +} + +func TestConfigureDeploymentForMode(t *testing.T) { + type tt struct { + desc string + mode lokiv1beta1.ModeType + flags FeatureFlags + dpl *appsv1.Deployment + want *appsv1.Deployment + } + + tc := []tt{ + { + desc: "static mode", + mode: lokiv1beta1.Static, + dpl: &appsv1.Deployment{}, + want: &appsv1.Deployment{}, + }, + { + desc: "dynamic mode", + mode: lokiv1beta1.Dynamic, + dpl: &appsv1.Deployment{}, + want: &appsv1.Deployment{}, + }, + { + desc: "openshift-logging mode", + mode: lokiv1beta1.OpenshiftLogging, + dpl: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: gatewayContainerName, + Args: []string{ + "--logs.read.endpoint=http://example.com", + "--logs.tail.endpoint=http://example.com", + "--logs.write.endpoint=http://example.com", + }, + }, + }, + }, + }, + }, + }, + want: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: gatewayContainerName, + Args: []string{ + "--logs.read.endpoint=http://example.com", + "--logs.tail.endpoint=http://example.com", + "--logs.write.endpoint=http://example.com", + }, + }, + { + Name: "opa", + Image: "quay.io/observatorium/opa-openshift:latest", + Args: []string{ + "--log.level=warn", + "--opa.package=lokistack", + "--web.listen=:8082", + "--web.internal.listen=:8083", + "--web.healthchecks.url=http://localhost:8082", + `--openshift.mappings=application=loki.openshift.io`, + `--openshift.mappings=infrastructure=loki.openshift.io`, + `--openshift.mappings=audit=loki.openshift.io`, + }, + Ports: []corev1.ContainerPort{ + { + Name: openshift.GatewayOPAHTTPPortName, + ContainerPort: openshift.GatewayOPAHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: openshift.GatewayOPAInternalPortName, + ContainerPort: openshift.GatewayOPAInternalPort, + Protocol: corev1.ProtocolTCP, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/live", + Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)), + Scheme: corev1.URISchemeHTTP, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)), + Scheme: corev1.URISchemeHTTP, + }, + }, + TimeoutSeconds: 1, + PeriodSeconds: 5, + FailureThreshold: 12, + }, + }, + }, + }, + }, + }, + }, + }, + { + desc: "openshift-logging mode with-tls-service-monitor-config", + mode: lokiv1beta1.OpenshiftLogging, + flags: FeatureFlags{ + EnableTLSServiceMonitorConfig: true, + }, + dpl: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: gatewayContainerName, + Args: []string{ + "--logs.read.endpoint=http://example.com", + "--logs.tail.endpoint=http://example.com", + "--logs.write.endpoint=http://example.com", + }, + }, + }, + }, + }, + }, + }, + want: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: gatewayContainerName, + Args: []string{ + "--logs.read.endpoint=http://example.com", + "--logs.tail.endpoint=http://example.com", + "--logs.write.endpoint=http://example.com", + }, + }, + { + Name: "opa", + Image: "quay.io/observatorium/opa-openshift:latest", + Args: []string{ + "--log.level=warn", + "--opa.package=lokistack", + "--web.listen=:8082", + "--web.internal.listen=:8083", + "--web.healthchecks.url=http://localhost:8082", + "--tls.internal.server.cert-file=/var/run/tls/tls.crt", + "--tls.internal.server.key-file=/var/run/tls/tls.key", + `--openshift.mappings=application=loki.openshift.io`, + `--openshift.mappings=infrastructure=loki.openshift.io`, + `--openshift.mappings=audit=loki.openshift.io`, + }, + Ports: []corev1.ContainerPort{ + { + Name: openshift.GatewayOPAHTTPPortName, + ContainerPort: openshift.GatewayOPAHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: openshift.GatewayOPAInternalPortName, + ContainerPort: openshift.GatewayOPAInternalPort, + Protocol: corev1.ProtocolTCP, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/live", + Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)), + Scheme: corev1.URISchemeHTTPS, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)), + Scheme: corev1.URISchemeHTTPS, + }, + }, + TimeoutSeconds: 1, + PeriodSeconds: 5, + FailureThreshold: 12, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: tlsMetricsSercetVolume, + ReadOnly: true, + MountPath: gateway.LokiGatewayTLSDir, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + desc: "openshift-logging mode with-cert-signing-service", + mode: lokiv1beta1.OpenshiftLogging, + flags: FeatureFlags{ + EnableTLSServiceMonitorConfig: true, + EnableCertificateSigningService: true, + }, + dpl: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: gatewayContainerName, + Args: []string{ + "--other.args=foo-bar", + "--logs.read.endpoint=http://example.com", + "--logs.tail.endpoint=http://example.com", + "--logs.write.endpoint=http://example.com", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "tls-secret", + ReadOnly: true, + MountPath: "/var/run/tls", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "tls-secret-volume", + }, + }, + }, + }, + }, + }, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + ServiceAccountName: "gateway", + Containers: []corev1.Container{ + { + Name: gatewayContainerName, + Args: []string{ + "--other.args=foo-bar", + "--logs.read.endpoint=https://example.com", + "--logs.tail.endpoint=https://example.com", + "--logs.write.endpoint=https://example.com", + "--logs.tls.ca-file=/var/run/ca/service-ca.crt", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "tls-secret", + ReadOnly: true, + MountPath: "/var/run/tls", + }, + { + Name: "gateway-ca-bundle", + ReadOnly: true, + MountPath: "/var/run/ca", + }, + }, + }, + { + Name: "opa", + Image: "quay.io/observatorium/opa-openshift:latest", + Args: []string{ + "--log.level=warn", + "--opa.package=lokistack", + "--web.listen=:8082", + "--web.internal.listen=:8083", + "--web.healthchecks.url=http://localhost:8082", + "--tls.internal.server.cert-file=/var/run/tls/tls.crt", + "--tls.internal.server.key-file=/var/run/tls/tls.key", + `--openshift.mappings=application=loki.openshift.io`, + `--openshift.mappings=infrastructure=loki.openshift.io`, + `--openshift.mappings=audit=loki.openshift.io`, + }, + Ports: []corev1.ContainerPort{ + { + Name: openshift.GatewayOPAHTTPPortName, + ContainerPort: openshift.GatewayOPAHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: openshift.GatewayOPAInternalPortName, + ContainerPort: openshift.GatewayOPAInternalPort, + Protocol: corev1.ProtocolTCP, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/live", + Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)), + Scheme: corev1.URISchemeHTTPS, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)), + Scheme: corev1.URISchemeHTTPS, + }, + }, + TimeoutSeconds: 1, + PeriodSeconds: 5, + FailureThreshold: 12, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: tlsMetricsSercetVolume, + ReadOnly: true, + MountPath: gateway.LokiGatewayTLSDir, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "tls-secret-volume", + }, + { + Name: "gateway-ca-bundle", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: "gateway-ca-bundle", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + for _, tc := range tc { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + t.Parallel() + err := configureDeploymentForMode(tc.dpl, tc.mode, tc.flags) + require.NoError(t, err) + require.Equal(t, tc.want, tc.dpl) + }) + } +} + +func TestConfigureServiceForMode(t *testing.T) { + type tt struct { + desc string + mode lokiv1beta1.ModeType + svc *corev1.ServiceSpec + want *corev1.ServiceSpec + } + + tc := []tt{ + { + desc: "static mode", + mode: lokiv1beta1.Static, + svc: &corev1.ServiceSpec{}, + want: &corev1.ServiceSpec{}, + }, + { + desc: "dynamic mode", + mode: lokiv1beta1.Dynamic, + svc: &corev1.ServiceSpec{}, + want: &corev1.ServiceSpec{}, + }, + { + desc: "openshift-logging mode", + mode: lokiv1beta1.OpenshiftLogging, + svc: &corev1.ServiceSpec{}, + want: &corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: openshift.GatewayOPAInternalPortName, + Port: openshift.GatewayOPAInternalPort, + }, + }, + }, + }, + } + for _, tc := range tc { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + t.Parallel() + err := configureServiceForMode(tc.svc, tc.mode) + require.NoError(t, err) + require.Equal(t, tc.want, tc.svc) + }) + } +} + +func TestConfigureServiceMonitorForMode(t *testing.T) { + type tt struct { + desc string + mode lokiv1beta1.ModeType + flags FeatureFlags + sm *monitoringv1.ServiceMonitor + want *monitoringv1.ServiceMonitor + } + + tc := []tt{ + { + desc: "static mode", + mode: lokiv1beta1.Static, + sm: &monitoringv1.ServiceMonitor{}, + want: &monitoringv1.ServiceMonitor{}, + }, + { + desc: "dynamic mode", + mode: lokiv1beta1.Dynamic, + sm: &monitoringv1.ServiceMonitor{}, + want: &monitoringv1.ServiceMonitor{}, + }, + { + desc: "openshift-logging mode", + mode: lokiv1beta1.OpenshiftLogging, + sm: &monitoringv1.ServiceMonitor{}, + want: &monitoringv1.ServiceMonitor{ + Spec: monitoringv1.ServiceMonitorSpec{ + Endpoints: []monitoringv1.Endpoint{ + { + Port: openshift.GatewayOPAInternalPortName, + Path: "/metrics", + Scheme: "http", + }, + }, + }, + }, + }, + { + desc: "openshift-logging mode with-tls-service-monitor-config", + mode: lokiv1beta1.OpenshiftLogging, + flags: FeatureFlags{ + EnableTLSServiceMonitorConfig: true, + }, + sm: &monitoringv1.ServiceMonitor{ + Spec: monitoringv1.ServiceMonitorSpec{ + Endpoints: []monitoringv1.Endpoint{ + { + TLSConfig: &monitoringv1.TLSConfig{ + CAFile: "/path/to/ca/file", + CertFile: "/path/to/cert/file", + KeyFile: "/path/to/key/file", + }, + }, + }, + }, + }, + want: &monitoringv1.ServiceMonitor{ + Spec: monitoringv1.ServiceMonitorSpec{ + Endpoints: []monitoringv1.Endpoint{ + { + TLSConfig: &monitoringv1.TLSConfig{ + CAFile: "/path/to/ca/file", + CertFile: "/path/to/cert/file", + KeyFile: "/path/to/key/file", + }, + }, + { + Port: openshift.GatewayOPAInternalPortName, + Path: "/metrics", + Scheme: "https", + BearerTokenFile: BearerTokenFile, + TLSConfig: &monitoringv1.TLSConfig{ + CAFile: "/path/to/ca/file", + CertFile: "/path/to/cert/file", + KeyFile: "/path/to/key/file", + }, + }, + }, + }, + }, + }, + } + for _, tc := range tc { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + t.Parallel() + err := configureServiceMonitorForMode(tc.sm, tc.mode, tc.flags) + require.NoError(t, err) + require.Equal(t, tc.want, tc.sm) + }) + } +} diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go new file mode 100644 index 0000000000..e7e83e2e40 --- /dev/null +++ b/operator/internal/manifests/gateway_test.go @@ -0,0 +1,278 @@ +package manifests + +import ( + "math/rand" + "reflect" + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests/openshift" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + routev1 "github.com/openshift/api/route/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +func TestNewGatewayDeployment_HasTemplateConfigHashAnnotation(t *testing.T) { + sha1C := "deadbeef" + ss := NewGatewayDeployment(Options{ + Name: "abcd", + Namespace: "efgh", + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + }, + }, + }, sha1C) + + expected := "loki.grafana.com/config-hash" + annotations := ss.Spec.Template.Annotations + require.Contains(t, annotations, expected) + require.Equal(t, annotations[expected], sha1C) +} + +func TestGatewayConfigMap_ReturnsSHA1OfBinaryContents(t *testing.T) { + opts := Options{ + Name: uuid.New().String(), + Namespace: uuid.New().String(), + Image: uuid.New().String(), + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.Dynamic, + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: "test", + }, + IssuerURL: "https://127.0.0.1:5556/dex", + RedirectURL: "https://localhost:8443/oidc/test/callback", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "http://127.0.0.1:8181/v1/data/observatorium/allow", + }, + }, + }, + }, + TenantSecrets: []*TenantSecrets{ + { + TenantName: "test", + ClientID: "test", + ClientSecret: "test", + IssuerCAPath: "/tmp/test", + }, + }, + } + + _, sha1C, err := gatewayConfigMap(opts) + require.NoError(t, err) + require.NotEmpty(t, sha1C) +} + +func TestBuildGateway_HasConfigForTenantMode(t *testing.T) { + objs, err := BuildGateway(Options{ + Name: "abcd", + Namespace: "efgh", + Flags: FeatureFlags{ + EnableGateway: true, + }, + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.OpenshiftLogging, + }, + }, + }) + + require.NoError(t, err) + + d, ok := objs[1].(*appsv1.Deployment) + require.True(t, ok) + require.Len(t, d.Spec.Template.Spec.Containers, 2) +} + +func TestBuildGateway_HasExtraObjectsForTenantMode(t *testing.T) { + objs, err := BuildGateway(Options{ + Name: "abcd", + Namespace: "efgh", + Flags: FeatureFlags{ + EnableGateway: true, + }, + OpenShiftOptions: openshift.Options{ + BuildOpts: openshift.BuildOptions{ + GatewayName: "abc", + GatewayNamespace: "efgh", + LokiStackName: "abc", + }, + }, + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.OpenshiftLogging, + }, + }, + }) + + require.NoError(t, err) + require.Len(t, objs, 7) +} + +func TestBuildGateway_WithExtraObjectsForTenantMode_RouteSvcMatches(t *testing.T) { + objs, err := BuildGateway(Options{ + Name: "abcd", + Namespace: "efgh", + Flags: FeatureFlags{ + EnableGateway: true, + }, + OpenShiftOptions: openshift.Options{ + BuildOpts: openshift.BuildOptions{ + GatewayName: "abc", + GatewayNamespace: "efgh", + GatewaySvcName: serviceNameGatewayHTTP("abcd"), + GatewaySvcTargetPort: gatewayHTTPPortName, + LokiStackName: "abc", + }, + }, + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.OpenshiftLogging, + }, + }, + }) + + require.NoError(t, err) + + svc := objs[2].(*corev1.Service) + rt := objs[3].(*routev1.Route) + require.Equal(t, svc.Kind, rt.Spec.To.Kind) + require.Equal(t, svc.Name, rt.Spec.To.Name) + require.Equal(t, svc.Spec.Ports[0].Name, rt.Spec.Port.TargetPort.StrVal) +} + +func TestBuildGateway_WithExtraObjectsForTenantMode_ServiceAccountNameMatches(t *testing.T) { + objs, err := BuildGateway(Options{ + Name: "abcd", + Namespace: "efgh", + Flags: FeatureFlags{ + EnableGateway: true, + }, + OpenShiftOptions: openshift.Options{ + BuildOpts: openshift.BuildOptions{ + GatewayName: GatewayName("abcd"), + GatewayNamespace: "efgh", + GatewaySvcName: serviceNameGatewayHTTP("abcd"), + GatewaySvcTargetPort: gatewayHTTPPortName, + LokiStackName: "abc", + }, + }, + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.OpenshiftLogging, + }, + }, + }) + + require.NoError(t, err) + + dpl := objs[1].(*appsv1.Deployment) + sa := objs[4].(*corev1.ServiceAccount) + require.Equal(t, dpl.Spec.Template.Spec.ServiceAccountName, sa.Name) +} + +func TestBuildGateway_WithExtraObjectsForTenantMode_ReplacesIngressWithRoute(t *testing.T) { + objs, err := BuildGateway(Options{ + Name: "abcd", + Namespace: "efgh", + Flags: FeatureFlags{ + EnableGateway: true, + }, + OpenShiftOptions: openshift.Options{ + BuildOpts: openshift.BuildOptions{ + GatewayName: GatewayName("abcd"), + GatewayNamespace: "efgh", + GatewaySvcName: serviceNameGatewayHTTP("abcd"), + GatewaySvcTargetPort: gatewayHTTPPortName, + LokiStackName: "abc", + }, + }, + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: rand.Int31(), + }, + }, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.OpenshiftLogging, + }, + }, + }) + + require.NoError(t, err) + + var kinds []string + for _, o := range objs { + kinds = append(kinds, reflect.TypeOf(o).String()) + } + + require.NotContains(t, kinds, "*v1.Ingress") + require.Contains(t, kinds, "*v1.Route") +} diff --git a/operator/internal/manifests/indexgateway.go b/operator/internal/manifests/indexgateway.go new file mode 100644 index 0000000000..67a44eda98 --- /dev/null +++ b/operator/internal/manifests/indexgateway.go @@ -0,0 +1,239 @@ +package manifests + +import ( + "fmt" + "path" + + "github.com/grafana/loki-operator/internal/manifests/internal/config" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// BuildIndexGateway returns a list of k8s objects for Loki IndexGateway +func BuildIndexGateway(opts Options) ([]client.Object, error) { + statefulSet := NewIndexGatewayStatefulSet(opts) + if opts.Flags.EnableTLSServiceMonitorConfig { + if err := configureIndexGatewayServiceMonitorPKI(statefulSet, opts.Name); err != nil { + return nil, err + } + } + + return []client.Object{ + statefulSet, + NewIndexGatewayGRPCService(opts), + NewIndexGatewayHTTPService(opts), + }, nil +} + +// NewIndexGatewayStatefulSet creates a statefulset object for an index-gateway +func NewIndexGatewayStatefulSet(opts Options) *appsv1.StatefulSet { + podSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: configVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: lokiConfigMapName(opts.Name), + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Image: opts.Image, + Name: "loki-index-gateway", + Resources: corev1.ResourceRequirements{ + Limits: opts.ResourceRequirements.IndexGateway.Limits, + Requests: opts.ResourceRequirements.IndexGateway.Requests, + }, + Args: []string{ + "-target=index-gateway", + fmt.Sprintf("-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), + fmt.Sprintf("-runtime-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + PeriodSeconds: 10, + InitialDelaySeconds: 15, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/metrics", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + SuccessThreshold: 1, + }, + Ports: []corev1.ContainerPort{ + { + Name: lokiHTTPPortName, + ContainerPort: httpPort, + Protocol: protocolTCP, + }, + { + Name: lokiGRPCPortName, + ContainerPort: grpcPort, + Protocol: protocolTCP, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: configVolumeName, + ReadOnly: false, + MountPath: config.LokiConfigMountDir, + }, + { + Name: storageVolumeName, + ReadOnly: false, + MountPath: dataDirectory, + }, + }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "File", + ImagePullPolicy: "IfNotPresent", + }, + }, + } + + if opts.Stack.Template != nil && opts.Stack.Template.IndexGateway != nil { + podSpec.Tolerations = opts.Stack.Template.IndexGateway.Tolerations + podSpec.NodeSelector = opts.Stack.Template.IndexGateway.NodeSelector + } + + l := ComponentLabels(LabelIndexGatewayComponent, opts.Name) + a := commonAnnotations(opts.ConfigSHA1) + + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: IndexGatewayName(opts.Name), + Labels: l, + }, + Spec: appsv1.StatefulSetSpec{ + PodManagementPolicy: appsv1.OrderedReadyPodManagement, + RevisionHistoryLimit: pointer.Int32Ptr(10), + Replicas: pointer.Int32Ptr(opts.Stack.Template.IndexGateway.Replicas), + Selector: &metav1.LabelSelector{ + MatchLabels: labels.Merge(l, GossipLabels()), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("loki-index-gateway-%s", opts.Name), + Labels: labels.Merge(l, GossipLabels()), + Annotations: a, + }, + Spec: podSpec, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Labels: l, + Name: storageVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + // TODO: should we verify that this is possible with the given storage class first? + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: opts.ResourceRequirements.IndexGateway.PVCSize, + }, + }, + StorageClassName: pointer.StringPtr(opts.Stack.StorageClassName), + VolumeMode: &volumeFileSystemMode, + }, + }, + }, + }, + } +} + +// NewIndexGatewayGRPCService creates a k8s service for the index-gateway GRPC endpoint +func NewIndexGatewayGRPCService(opts Options) *corev1.Service { + l := ComponentLabels(LabelIndexGatewayComponent, opts.Name) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceNameIndexGatewayGRPC(opts.Name), + Labels: l, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: lokiGRPCPortName, + Port: grpcPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: grpcPort}, + }, + }, + Selector: l, + }, + } +} + +// NewIndexGatewayHTTPService creates a k8s service for the index-gateway HTTP endpoint +func NewIndexGatewayHTTPService(opts Options) *corev1.Service { + serviceName := serviceNameIndexGatewayHTTP(opts.Name) + l := ComponentLabels(LabelIndexGatewayComponent, opts.Name) + a := serviceAnnotations(serviceName, opts.Flags.EnableCertificateSigningService) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Labels: l, + Annotations: a, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: lokiHTTPPortName, + Port: httpPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: httpPort}, + }, + }, + Selector: l, + }, + } +} + +func configureIndexGatewayServiceMonitorPKI(statefulSet *appsv1.StatefulSet, stackName string) error { + serviceName := serviceNameIndexGatewayHTTP(stackName) + return configureServiceMonitorPKI(&statefulSet.Spec.Template.Spec, serviceName) +} diff --git a/operator/internal/manifests/indexgateway_test.go b/operator/internal/manifests/indexgateway_test.go new file mode 100644 index 0000000000..6afb748a5d --- /dev/null +++ b/operator/internal/manifests/indexgateway_test.go @@ -0,0 +1,57 @@ +package manifests_test + +import ( + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/stretchr/testify/require" +) + +func TestNewIndexGatewayStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) { + ss := manifests.NewIndexGatewayStatefulSet(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + ConfigSHA1: "deadbeef", + Stack: lokiv1beta1.LokiStackSpec{ + StorageClassName: "standard", + Template: &lokiv1beta1.LokiTemplateSpec{ + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + + expected := "loki.grafana.com/config-hash" + annotations := ss.Spec.Template.Annotations + require.Contains(t, annotations, expected) + require.Equal(t, annotations[expected], "deadbeef") +} + +func TestNewIndexGatewayStatefulSet_SelectorMatchesLabels(t *testing.T) { + // You must set the .spec.selector field of a StatefulSet to match the labels of + // its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the + // .spec.selector field was defaulted when omitted. In 1.8 and later versions, + // failing to specify a matching Pod Selector will result in a validation error + // during StatefulSet creation. + // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector + ss := manifests.NewIndexGatewayStatefulSet(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + Stack: lokiv1beta1.LokiStackSpec{ + StorageClassName: "standard", + Template: &lokiv1beta1.LokiTemplateSpec{ + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + + l := ss.Spec.Template.GetObjectMeta().GetLabels() + for key, value := range ss.Spec.Selector.MatchLabels { + require.Contains(t, l, key) + require.Equal(t, l[key], value) + } +} diff --git a/operator/internal/manifests/ingester.go b/operator/internal/manifests/ingester.go new file mode 100644 index 0000000000..4d34c0ba9e --- /dev/null +++ b/operator/internal/manifests/ingester.go @@ -0,0 +1,267 @@ +package manifests + +import ( + "fmt" + "path" + + "github.com/grafana/loki-operator/internal/manifests/internal/config" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// BuildIngester builds the k8s objects required to run Loki Ingester +func BuildIngester(opts Options) ([]client.Object, error) { + statefulSet := NewIngesterStatefulSet(opts) + if opts.Flags.EnableTLSServiceMonitorConfig { + if err := configureIngesterServiceMonitorPKI(statefulSet, opts.Name); err != nil { + return nil, err + } + } + + return []client.Object{ + statefulSet, + NewIngesterGRPCService(opts), + NewIngesterHTTPService(opts), + }, nil +} + +// NewIngesterStatefulSet creates a deployment object for an ingester +func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet { + podSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: configVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: lokiConfigMapName(opts.Name), + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Image: opts.Image, + Name: "loki-ingester", + Resources: corev1.ResourceRequirements{ + Limits: opts.ResourceRequirements.Ingester.Limits, + Requests: opts.ResourceRequirements.Ingester.Requests, + }, + Args: []string{ + "-target=ingester", + fmt.Sprintf("-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), + fmt.Sprintf("-runtime-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + PeriodSeconds: 10, + InitialDelaySeconds: 15, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/metrics", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + SuccessThreshold: 1, + }, + Ports: []corev1.ContainerPort{ + { + Name: lokiHTTPPortName, + ContainerPort: httpPort, + Protocol: protocolTCP, + }, + { + Name: lokiGRPCPortName, + ContainerPort: grpcPort, + Protocol: protocolTCP, + }, + { + Name: lokiGossipPortName, + ContainerPort: gossipPort, + Protocol: protocolTCP, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: configVolumeName, + ReadOnly: false, + MountPath: config.LokiConfigMountDir, + }, + { + Name: storageVolumeName, + ReadOnly: false, + MountPath: dataDirectory, + }, + { + Name: walVolumeName, + ReadOnly: false, + MountPath: walDirectory, + }, + }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "File", + ImagePullPolicy: "IfNotPresent", + }, + }, + } + + if opts.Stack.Template != nil && opts.Stack.Template.Ingester != nil { + podSpec.Tolerations = opts.Stack.Template.Ingester.Tolerations + podSpec.NodeSelector = opts.Stack.Template.Ingester.NodeSelector + } + + l := ComponentLabels(LabelIngesterComponent, opts.Name) + a := commonAnnotations(opts.ConfigSHA1) + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: IngesterName(opts.Name), + Labels: l, + }, + Spec: appsv1.StatefulSetSpec{ + PodManagementPolicy: appsv1.OrderedReadyPodManagement, + RevisionHistoryLimit: pointer.Int32Ptr(10), + Replicas: pointer.Int32Ptr(opts.Stack.Template.Ingester.Replicas), + Selector: &metav1.LabelSelector{ + MatchLabels: labels.Merge(l, GossipLabels()), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("loki-ingester-%s", opts.Name), + Labels: labels.Merge(l, GossipLabels()), + Annotations: a, + }, + Spec: podSpec, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Labels: l, + Name: storageVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + // TODO: should we verify that this is possible with the given storage class first? + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: opts.ResourceRequirements.Ingester.PVCSize, + }, + }, + StorageClassName: pointer.StringPtr(opts.Stack.StorageClassName), + VolumeMode: &volumeFileSystemMode, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Labels: l, + Name: walVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + // TODO: should we verify that this is possible with the given storage class first? + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: opts.ResourceRequirements.WALStorage.PVCSize, + }, + }, + StorageClassName: pointer.StringPtr(opts.Stack.StorageClassName), + VolumeMode: &volumeFileSystemMode, + }, + }, + }, + }, + } +} + +// NewIngesterGRPCService creates a k8s service for the ingester GRPC endpoint +func NewIngesterGRPCService(opts Options) *corev1.Service { + l := ComponentLabels(LabelIngesterComponent, opts.Name) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceNameIngesterGRPC(opts.Name), + Labels: l, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: lokiGRPCPortName, + Port: grpcPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: grpcPort}, + }, + }, + Selector: l, + }, + } +} + +// NewIngesterHTTPService creates a k8s service for the ingester HTTP endpoint +func NewIngesterHTTPService(opts Options) *corev1.Service { + serviceName := serviceNameIngesterHTTP(opts.Name) + l := ComponentLabels(LabelIngesterComponent, opts.Name) + a := serviceAnnotations(serviceName, opts.Flags.EnableCertificateSigningService) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Labels: l, + Annotations: a, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: lokiHTTPPortName, + Port: httpPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: httpPort}, + }, + }, + Selector: l, + }, + } +} + +func configureIngesterServiceMonitorPKI(statefulSet *appsv1.StatefulSet, stackName string) error { + serviceName := serviceNameIngesterHTTP(stackName) + return configureServiceMonitorPKI(&statefulSet.Spec.Template.Spec, serviceName) +} diff --git a/operator/internal/manifests/ingester_test.go b/operator/internal/manifests/ingester_test.go new file mode 100644 index 0000000000..52852d6444 --- /dev/null +++ b/operator/internal/manifests/ingester_test.go @@ -0,0 +1,57 @@ +package manifests_test + +import ( + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/stretchr/testify/require" +) + +func TestNewIngesterStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) { + ss := manifests.NewIngesterStatefulSet(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + ConfigSHA1: "deadbeef", + Stack: lokiv1beta1.LokiStackSpec{ + StorageClassName: "standard", + Template: &lokiv1beta1.LokiTemplateSpec{ + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + + expected := "loki.grafana.com/config-hash" + annotations := ss.Spec.Template.Annotations + require.Contains(t, annotations, expected) + require.Equal(t, annotations[expected], "deadbeef") +} + +func TestNewIngesterStatefulSet_SelectorMatchesLabels(t *testing.T) { + // You must set the .spec.selector field of a StatefulSet to match the labels of + // its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the + // .spec.selector field was defaulted when omitted. In 1.8 and later versions, + // failing to specify a matching Pod Selector will result in a validation error + // during StatefulSet creation. + // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector + sts := manifests.NewIngesterStatefulSet(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + Stack: lokiv1beta1.LokiStackSpec{ + StorageClassName: "standard", + Template: &lokiv1beta1.LokiTemplateSpec{ + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + + l := sts.Spec.Template.GetObjectMeta().GetLabels() + for key, value := range sts.Spec.Selector.MatchLabels { + require.Contains(t, l, key) + require.Equal(t, l[key], value) + } +} diff --git a/operator/internal/manifests/internal/config/build.go b/operator/internal/manifests/internal/config/build.go new file mode 100644 index 0000000000..3bb49021cf --- /dev/null +++ b/operator/internal/manifests/internal/config/build.go @@ -0,0 +1,56 @@ +package config + +import ( + "bytes" + "embed" + "io/ioutil" + "text/template" + + "github.com/ViaQ/logerr/kverrors" +) + +const ( + // LokiConfigFileName is the name of the config file in the configmap + LokiConfigFileName = "config.yaml" + // LokiRuntimeConfigFileName is the name of the runtime config file in the configmap + LokiRuntimeConfigFileName = "runtime-config.yaml" + // LokiConfigMountDir is the path that is mounted from the configmap + LokiConfigMountDir = "/etc/loki/config" +) + +var ( + //go:embed loki-config.yaml + lokiConfigYAMLTmplFile embed.FS + + //go:embed loki-runtime-config.yaml + lokiRuntimeConfigYAMLTmplFile embed.FS + + lokiConfigYAMLTmpl = template.Must(template.ParseFS(lokiConfigYAMLTmplFile, "loki-config.yaml")) + + lokiRuntimeConfigYAMLTmpl = template.Must(template.ParseFS(lokiRuntimeConfigYAMLTmplFile, "loki-runtime-config.yaml")) +) + +// Build builds a loki stack configuration files +func Build(opts Options) ([]byte, []byte, error) { + // Build loki config yaml + w := bytes.NewBuffer(nil) + err := lokiConfigYAMLTmpl.Execute(w, opts) + if err != nil { + return nil, nil, kverrors.Wrap(err, "failed to create loki configuration") + } + cfg, err := ioutil.ReadAll(w) + if err != nil { + return nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer") + } + // Build loki runtime config yaml + w = bytes.NewBuffer(nil) + err = lokiRuntimeConfigYAMLTmpl.Execute(w, opts) + if err != nil { + return nil, nil, kverrors.Wrap(err, "failed to create loki runtime configuration") + } + rcfg, err := ioutil.ReadAll(w) + if err != nil { + return nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer") + } + return cfg, rcfg, nil +} diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go new file mode 100644 index 0000000000..eb0b26d9d2 --- /dev/null +++ b/operator/internal/manifests/internal/config/build_test.go @@ -0,0 +1,518 @@ +package config + +import ( + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/stretchr/testify/require" +) + +func TestBuild_ConfigAndRuntimeConfig_NoRuntimeConfigGenerated(t *testing.T) { + expCfg := ` +--- +auth_enabled: true +chunk_store_config: + chunk_cache_config: + enable_fifocache: true + fifocache: + max_size_bytes: 500MB +compactor: + compaction_interval: 2h + shared_store: s3 + working_directory: /tmp/loki/compactor +frontend: + tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 + compress_responses: true + max_outstanding_per_tenant: 256 + log_queries_longer_than: 5s +frontend_worker: + frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 + grpc_client_config: + max_send_msg_size: 104857600 + parallelism: 1 +ingester: + chunk_block_size: 262144 + chunk_encoding: snappy + chunk_idle_period: 1h + chunk_retain_period: 30s + chunk_target_size: 1048576 + lifecycler: + final_sleep: 0s + heartbeat_period: 5s + interface_names: + - eth0 + join_after: 30s + num_tokens: 512 + ring: + replication_factor: 1 + heartbeat_timeout: 1m + max_transfer_retries: 0 + wal: + enabled: true + dir: /tmp/wal + replay_memory_ceiling: 2500 +ingester_client: + grpc_client_config: + max_recv_msg_size: 67108864 + remote_timeout: 1s +# NOTE: Keep the order of keys as in Loki docs +# to enable easy diffs when vendoring newer +# Loki releases. +# (See https://grafana.com/docs/loki/latest/configuration/#limits_config) +# +# Values for not exposed fields are taken from the grafana/loki production +# configuration manifests. +# (See https://github.com/grafana/loki/blob/main/production/ksonnet/loki/config.libsonnet) +limits_config: + ingestion_rate_strategy: global + ingestion_rate_mb: 4 + ingestion_burst_size_mb: 6 + max_label_name_length: 1024 + max_label_value_length: 2048 + max_label_names_per_series: 30 + reject_old_samples: true + reject_old_samples_max_age: 168h + creation_grace_period: 10m + enforce_metric_name: false + # Keep max_streams_per_user always to 0 to default + # using max_global_streams_per_user always. + # (See https://github.com/grafana/loki/blob/main/pkg/ingester/limiter.go#L73) + max_streams_per_user: 0 + max_line_size: 256000 + max_entries_limit_per_query: 5000 + max_global_streams_per_user: 0 + max_chunks_per_query: 2000000 + max_query_length: 721h + max_query_parallelism: 32 + max_query_series: 500 + cardinality_limit: 100000 + max_streams_matchers_per_query: 1000 + max_cache_freshness_per_query: 10m + per_stream_rate_limit: 3MB + per_stream_rate_limit_burst: 15MB +memberlist: + abort_if_cluster_join_fails: true + bind_port: 7946 + join_members: + - loki-gossip-ring-lokistack-dev.default.svc.cluster.local:7946 + max_join_backoff: 1m + max_join_retries: 10 + min_join_backoff: 1s +querier: + engine: + max_look_back_period: 30s + timeout: 3m + extra_query_delay: 0s + query_ingesters_within: 2h + query_timeout: 1m + tail_max_duration: 1h +query_range: + align_queries_with_step: true + cache_results: true + max_retries: 5 + results_cache: + cache: + enable_fifocache: true + fifocache: + max_size_bytes: 500MB + split_queries_by_interval: 30m + parallelise_shardable_queries: false +schema_config: + configs: + - from: "2020-10-01" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v11 + store: boltdb-shipper +server: + graceful_shutdown_timeout: 5s + grpc_server_min_time_between_pings: '10s' + grpc_server_ping_without_stream_allowed: true + grpc_server_max_concurrent_streams: 1000 + grpc_server_max_recv_msg_size: 104857600 + grpc_server_max_send_msg_size: 104857600 + http_listen_port: 3100 + http_server_idle_timeout: 120s + http_server_write_timeout: 1m + log_level: info +storage_config: + boltdb_shipper: + active_index_directory: /tmp/loki/index + cache_location: /tmp/loki/index_cache + cache_ttl: 24h + resync_interval: 5m + shared_store: s3 + index_gateway_client: + server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095 + aws: + s3: http://test.default.svc.cluster.local.:9000 + bucketnames: loki + region: us-east + access_key_id: test + secret_access_key: test123 + s3forcepathstyle: true +tracing: + enabled: false +` + expRCfg := ` +--- +overrides: +` + opts := Options{ + Stack: lokiv1beta1.LokiStackSpec{ + ReplicationFactor: 1, + Limits: &lokiv1beta1.LimitsSpec{ + Global: &lokiv1beta1.LimitsTemplateSpec{ + IngestionLimits: &lokiv1beta1.IngestionLimitSpec{ + IngestionRate: 4, + IngestionBurstSize: 6, + MaxLabelNameLength: 1024, + MaxLabelValueLength: 2048, + MaxLabelNamesPerSeries: 30, + MaxGlobalStreamsPerTenant: 0, + MaxLineSize: 256000, + }, + QueryLimits: &lokiv1beta1.QueryLimitSpec{ + MaxEntriesLimitPerQuery: 5000, + MaxChunksPerQuery: 2000000, + MaxQuerySeries: 500, + }, + }, + }, + }, + Namespace: "test-ns", + Name: "test", + FrontendWorker: Address{ + FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + GossipRing: Address{ + FQDN: "loki-gossip-ring-lokistack-dev.default.svc.cluster.local", + Port: 7946, + }, + Querier: Address{ + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, + }, + IndexGateway: Address{ + FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + StorageDirectory: "/tmp/loki", + ObjectStorage: ObjectStorage{ + Endpoint: "http://test.default.svc.cluster.local.:9000", + Region: "us-east", + Buckets: "loki", + AccessKeyID: "test", + AccessKeySecret: "test123", + }, + QueryParallelism: Parallelism{ + QuerierCPULimits: 2, + QueryFrontendReplicas: 2, + }, + WriteAheadLog: WriteAheadLog{ + Directory: "/tmp/wal", + IngesterMemoryRequest: 5000, + }, + } + cfg, rCfg, err := Build(opts) + require.NoError(t, err) + require.YAMLEq(t, expCfg, string(cfg)) + require.YAMLEq(t, expRCfg, string(rCfg)) +} + +func TestBuild_ConfigAndRuntimeConfig_BothGenerated(t *testing.T) { + expCfg := ` +--- +auth_enabled: true +chunk_store_config: + chunk_cache_config: + enable_fifocache: true + fifocache: + max_size_bytes: 500MB +compactor: + compaction_interval: 2h + shared_store: s3 + working_directory: /tmp/loki/compactor +frontend: + tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 + compress_responses: true + max_outstanding_per_tenant: 256 + log_queries_longer_than: 5s +frontend_worker: + frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 + grpc_client_config: + max_send_msg_size: 104857600 + parallelism: 1 +ingester: + chunk_block_size: 262144 + chunk_encoding: snappy + chunk_idle_period: 1h + chunk_retain_period: 30s + chunk_target_size: 1048576 + lifecycler: + final_sleep: 0s + heartbeat_period: 5s + interface_names: + - eth0 + join_after: 30s + num_tokens: 512 + ring: + replication_factor: 1 + heartbeat_timeout: 1m + max_transfer_retries: 0 + wal: + enabled: true + dir: /tmp/wal + replay_memory_ceiling: 2500 +ingester_client: + grpc_client_config: + max_recv_msg_size: 67108864 + remote_timeout: 1s +# NOTE: Keep the order of keys as in Loki docs +# to enable easy diffs when vendoring newer +# Loki releases. +# (See https://grafana.com/docs/loki/latest/configuration/#limits_config) +# +# Values for not exposed fields are taken from the grafana/loki production +# configuration manifests. +# (See https://github.com/grafana/loki/blob/main/production/ksonnet/loki/config.libsonnet) +limits_config: + ingestion_rate_strategy: global + ingestion_rate_mb: 4 + ingestion_burst_size_mb: 6 + max_label_name_length: 1024 + max_label_value_length: 2048 + max_label_names_per_series: 30 + reject_old_samples: true + reject_old_samples_max_age: 168h + creation_grace_period: 10m + enforce_metric_name: false + # Keep max_streams_per_user always to 0 to default + # using max_global_streams_per_user always. + # (See https://github.com/grafana/loki/blob/main/pkg/ingester/limiter.go#L73) + max_streams_per_user: 0 + max_line_size: 256000 + max_entries_limit_per_query: 5000 + max_global_streams_per_user: 0 + max_chunks_per_query: 2000000 + max_query_length: 721h + max_query_parallelism: 32 + max_query_series: 500 + cardinality_limit: 100000 + max_streams_matchers_per_query: 1000 + max_cache_freshness_per_query: 10m + per_stream_rate_limit: 3MB + per_stream_rate_limit_burst: 15MB +memberlist: + abort_if_cluster_join_fails: true + bind_port: 7946 + join_members: + - loki-gossip-ring-lokistack-dev.default.svc.cluster.local:7946 + max_join_backoff: 1m + max_join_retries: 10 + min_join_backoff: 1s +querier: + engine: + max_look_back_period: 30s + timeout: 3m + extra_query_delay: 0s + query_ingesters_within: 2h + query_timeout: 1m + tail_max_duration: 1h +query_range: + align_queries_with_step: true + cache_results: true + max_retries: 5 + results_cache: + cache: + enable_fifocache: true + fifocache: + max_size_bytes: 500MB + split_queries_by_interval: 30m + parallelise_shardable_queries: false +schema_config: + configs: + - from: "2020-10-01" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v11 + store: boltdb-shipper +server: + graceful_shutdown_timeout: 5s + grpc_server_min_time_between_pings: '10s' + grpc_server_ping_without_stream_allowed: true + grpc_server_max_concurrent_streams: 1000 + grpc_server_max_recv_msg_size: 104857600 + grpc_server_max_send_msg_size: 104857600 + http_listen_port: 3100 + http_server_idle_timeout: 120s + http_server_write_timeout: 1m + log_level: info +storage_config: + boltdb_shipper: + active_index_directory: /tmp/loki/index + cache_location: /tmp/loki/index_cache + cache_ttl: 24h + resync_interval: 5m + shared_store: s3 + index_gateway_client: + server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095 + aws: + s3: http://test.default.svc.cluster.local.:9000 + bucketnames: loki + region: us-east + access_key_id: test + secret_access_key: test123 + s3forcepathstyle: true +tracing: + enabled: false +` + expRCfg := ` +--- +overrides: + test-a: + ingestion_rate_mb: 2 + ingestion_burst_size_mb: 5 + max_global_streams_per_user: 1 + max_chunks_per_query: 1000000 +` + opts := Options{ + Stack: lokiv1beta1.LokiStackSpec{ + ReplicationFactor: 1, + Limits: &lokiv1beta1.LimitsSpec{ + Global: &lokiv1beta1.LimitsTemplateSpec{ + IngestionLimits: &lokiv1beta1.IngestionLimitSpec{ + IngestionRate: 4, + IngestionBurstSize: 6, + MaxLabelNameLength: 1024, + MaxLabelValueLength: 2048, + MaxLabelNamesPerSeries: 30, + MaxGlobalStreamsPerTenant: 0, + MaxLineSize: 256000, + }, + QueryLimits: &lokiv1beta1.QueryLimitSpec{ + MaxEntriesLimitPerQuery: 5000, + MaxChunksPerQuery: 2000000, + MaxQuerySeries: 500, + }, + }, + Tenants: map[string]lokiv1beta1.LimitsTemplateSpec{ + "test-a": { + IngestionLimits: &lokiv1beta1.IngestionLimitSpec{ + IngestionRate: 2, + IngestionBurstSize: 5, + MaxGlobalStreamsPerTenant: 1, + }, + QueryLimits: &lokiv1beta1.QueryLimitSpec{ + MaxChunksPerQuery: 1000000, + }, + }, + }, + }, + }, + Namespace: "test-ns", + Name: "test", + FrontendWorker: Address{ + FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + GossipRing: Address{ + FQDN: "loki-gossip-ring-lokistack-dev.default.svc.cluster.local", + Port: 7946, + }, + Querier: Address{ + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, + }, + IndexGateway: Address{ + FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + StorageDirectory: "/tmp/loki", + ObjectStorage: ObjectStorage{ + Endpoint: "http://test.default.svc.cluster.local.:9000", + Region: "us-east", + Buckets: "loki", + AccessKeyID: "test", + AccessKeySecret: "test123", + }, + QueryParallelism: Parallelism{ + QuerierCPULimits: 2, + QueryFrontendReplicas: 2, + }, + WriteAheadLog: WriteAheadLog{ + Directory: "/tmp/wal", + IngesterMemoryRequest: 5000, + }, + } + cfg, rCfg, err := Build(opts) + require.NoError(t, err) + require.YAMLEq(t, expCfg, string(cfg)) + require.YAMLEq(t, expRCfg, string(rCfg)) +} + +func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) { + opts := Options{ + Stack: lokiv1beta1.LokiStackSpec{ + ReplicationFactor: 1, + Limits: &lokiv1beta1.LimitsSpec{ + Global: &lokiv1beta1.LimitsTemplateSpec{ + IngestionLimits: &lokiv1beta1.IngestionLimitSpec{ + IngestionRate: 4, + IngestionBurstSize: 6, + MaxLabelNameLength: 1024, + MaxLabelValueLength: 2048, + MaxLabelNamesPerSeries: 30, + MaxGlobalStreamsPerTenant: 0, + MaxLineSize: 256000, + }, + // making it nil so that the template is not generated and error is returned + QueryLimits: nil, + }, + }, + }, + Namespace: "test-ns", + Name: "test", + FrontendWorker: Address{ + FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + GossipRing: Address{ + FQDN: "loki-gossip-ring-lokistack-dev.default.svc.cluster.local", + Port: 7946, + }, + Querier: Address{ + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, + }, + IndexGateway: Address{ + FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + StorageDirectory: "/tmp/loki", + ObjectStorage: ObjectStorage{ + Endpoint: "http://test.default.svc.cluster.local.:9000", + Region: "us-east", + Buckets: "loki", + AccessKeyID: "test", + AccessKeySecret: "test123", + }, + QueryParallelism: Parallelism{ + QuerierCPULimits: 2, + QueryFrontendReplicas: 2, + }, + WriteAheadLog: WriteAheadLog{ + Directory: "/tmp/wal", + IngesterMemoryRequest: 5000, + }, + } + cfg, rCfg, err := Build(opts) + require.Error(t, err) + require.Empty(t, cfg) + require.Empty(t, rCfg) +} diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml new file mode 100644 index 0000000000..88c757ea7e --- /dev/null +++ b/operator/internal/manifests/internal/config/loki-config.yaml @@ -0,0 +1,146 @@ +--- +auth_enabled: true +chunk_store_config: + chunk_cache_config: + enable_fifocache: true + fifocache: + max_size_bytes: 500MB +compactor: + compaction_interval: 2h + shared_store: s3 + working_directory: {{ .StorageDirectory }}/compactor +frontend: + tail_proxy_url: http://{{ .Querier.FQDN }}:{{ .Querier.Port }} + compress_responses: true + max_outstanding_per_tenant: 256 + log_queries_longer_than: 5s +frontend_worker: + frontend_address: {{ .FrontendWorker.FQDN }}:{{ .FrontendWorker.Port }} + grpc_client_config: + max_send_msg_size: 104857600 + parallelism: {{ .QueryParallelism.Value }} +ingester: + chunk_block_size: 262144 + chunk_encoding: snappy + chunk_idle_period: 1h + chunk_retain_period: 30s + chunk_target_size: 1048576 + lifecycler: + final_sleep: 0s + heartbeat_period: 5s + interface_names: + - eth0 + join_after: 30s + num_tokens: 512 + ring: + replication_factor: {{ .Stack.ReplicationFactor }} + heartbeat_timeout: 1m + max_transfer_retries: 0 + wal: + enabled: true + dir: {{ .WriteAheadLog.Directory }} + replay_memory_ceiling: {{ .WriteAheadLog.ReplayMemoryCeiling }} +ingester_client: + grpc_client_config: + max_recv_msg_size: 67108864 + remote_timeout: 1s +# NOTE: Keep the order of keys as in Loki docs +# to enable easy diffs when vendoring newer +# Loki releases. +# (See https://grafana.com/docs/loki/latest/configuration/#limits_config) +# +# Values for not exposed fields are taken from the grafana/loki production +# configuration manifests. +# (See https://github.com/grafana/loki/blob/main/production/ksonnet/loki/config.libsonnet) +limits_config: + ingestion_rate_strategy: global + ingestion_rate_mb: {{ .Stack.Limits.Global.IngestionLimits.IngestionRate }} + ingestion_burst_size_mb: {{ .Stack.Limits.Global.IngestionLimits.IngestionBurstSize }} + max_label_name_length: {{ .Stack.Limits.Global.IngestionLimits.MaxLabelNameLength }} + max_label_value_length: {{ .Stack.Limits.Global.IngestionLimits.MaxLabelValueLength }} + max_label_names_per_series: {{ .Stack.Limits.Global.IngestionLimits.MaxLabelNamesPerSeries }} + reject_old_samples: true + reject_old_samples_max_age: 168h + creation_grace_period: 10m + enforce_metric_name: false + # Keep max_streams_per_user always to 0 to default + # using max_global_streams_per_user always. + # (See https://github.com/grafana/loki/blob/main/pkg/ingester/limiter.go#L73) + max_streams_per_user: 0 + max_line_size: {{ .Stack.Limits.Global.IngestionLimits.MaxLineSize }} + max_entries_limit_per_query: {{ .Stack.Limits.Global.QueryLimits.MaxEntriesLimitPerQuery }} + max_global_streams_per_user: {{ .Stack.Limits.Global.IngestionLimits.MaxGlobalStreamsPerTenant }} + max_chunks_per_query: {{ .Stack.Limits.Global.QueryLimits.MaxChunksPerQuery }} + max_query_length: 721h + max_query_parallelism: 32 + max_query_series: {{ .Stack.Limits.Global.QueryLimits.MaxQuerySeries }} + cardinality_limit: 100000 + max_streams_matchers_per_query: 1000 + max_cache_freshness_per_query: 10m + per_stream_rate_limit: 3MB + per_stream_rate_limit_burst: 15MB +memberlist: + abort_if_cluster_join_fails: true + bind_port: {{ .GossipRing.Port }} + join_members: + - {{ .GossipRing.FQDN }}:{{ .GossipRing.Port }} + max_join_backoff: 1m + max_join_retries: 10 + min_join_backoff: 1s +querier: + engine: + max_look_back_period: 30s + timeout: 3m + extra_query_delay: 0s + query_ingesters_within: 2h + query_timeout: 1m + tail_max_duration: 1h +query_range: + align_queries_with_step: true + cache_results: true + max_retries: 5 + results_cache: + cache: + enable_fifocache: true + fifocache: + max_size_bytes: 500MB + split_queries_by_interval: 30m + parallelise_shardable_queries: false +schema_config: + configs: + - from: "2020-10-01" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v11 + store: boltdb-shipper +server: + graceful_shutdown_timeout: 5s + grpc_server_min_time_between_pings: '10s' + grpc_server_ping_without_stream_allowed: true + grpc_server_max_concurrent_streams: 1000 + grpc_server_max_recv_msg_size: 104857600 + grpc_server_max_send_msg_size: 104857600 + http_listen_port: 3100 + http_server_idle_timeout: 120s + http_server_write_timeout: 1m + log_level: info +storage_config: + boltdb_shipper: + active_index_directory: {{ .StorageDirectory }}/index + cache_location: {{ .StorageDirectory }}/index_cache + cache_ttl: 24h + resync_interval: 5m + shared_store: s3 + index_gateway_client: + server_address: dns:///{{ .IndexGateway.FQDN }}:{{ .IndexGateway.Port }} + aws: + s3: {{ .ObjectStorage.Endpoint }} + bucketnames: {{ .ObjectStorage.Buckets }} + region: {{ .ObjectStorage.Region }} + access_key_id: {{ .ObjectStorage.AccessKeyID }} + secret_access_key: {{ .ObjectStorage.AccessKeySecret }} + s3forcepathstyle: true +tracing: + enabled: false diff --git a/operator/internal/manifests/internal/config/loki-runtime-config.yaml b/operator/internal/manifests/internal/config/loki-runtime-config.yaml new file mode 100644 index 0000000000..a2d23ab7b3 --- /dev/null +++ b/operator/internal/manifests/internal/config/loki-runtime-config.yaml @@ -0,0 +1,39 @@ +--- +overrides: + {{- range $tenant, $spec := .Stack.Limits.Tenants }} + {{ $tenant }}: + {{- if $l := $spec.IngestionLimits -}} + {{ if $l.IngestionRate }} + ingestion_rate_mb: {{ $l.IngestionRate }} + {{- end -}} + {{ if $l.IngestionBurstSize }} + ingestion_burst_size_mb: {{ $l.IngestionBurstSize }} + {{- end -}} + {{ if $l.MaxLabelNameLength }} + max_label_name_length: {{ $l.MaxLabelNameLength }} + {{- end -}} + {{ if $l.MaxLabelValueLength }} + max_label_value_length: {{ $l.MaxLabelValueLength }} + {{- end -}} + {{ if $l.MaxLabelNamesPerSeries }} + max_label_names_per_series: {{ $l.MaxLabelNamesPerSeries }} + {{- end -}} + {{ if $l.MaxLineSize }} + max_line_size: {{ $l.MaxLineSize }} + {{- end -}} + {{ if $l.MaxGlobalStreamsPerTenant }} + max_global_streams_per_user: {{ $l.MaxGlobalStreamsPerTenant }} + {{- end -}} + {{- end -}} + {{- if $l := $spec.QueryLimits -}} + {{ if $l.MaxEntriesLimitPerQuery }} + max_entries_limit_per_query: {{ $spec.QueryLimits.MaxEntriesLimitPerQuery }} + {{- end -}} + {{ if $spec.QueryLimits.MaxChunksPerQuery }} + max_chunks_per_query: {{ $spec.QueryLimits.MaxChunksPerQuery }} + {{- end -}} + {{ if $spec.QueryLimits.MaxQuerySeries }} + max_query_series: {{ $spec.QueryLimits.MaxQuerySeries }} + {{- end -}} + {{- end -}} + {{- end -}} diff --git a/operator/internal/manifests/internal/config/options.go b/operator/internal/manifests/internal/config/options.go new file mode 100644 index 0000000000..75198de9b8 --- /dev/null +++ b/operator/internal/manifests/internal/config/options.go @@ -0,0 +1,68 @@ +package config + +import ( + "fmt" + "math" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" +) + +// Options is used to render the loki-config.yaml file template +type Options struct { + Stack lokiv1beta1.LokiStackSpec + + Namespace string + Name string + FrontendWorker Address + GossipRing Address + Querier Address + IndexGateway Address + StorageDirectory string + ObjectStorage ObjectStorage + QueryParallelism Parallelism + WriteAheadLog WriteAheadLog +} + +// Address FQDN and port for a k8s service. +type Address struct { + // FQDN is required + FQDN string + // Port is required + Port int +} + +// ObjectStorage for storage config. +type ObjectStorage struct { + Endpoint string + Region string + Buckets string + AccessKeyID string + AccessKeySecret string +} + +// Parallelism for query processing parallelism +// and rate limiting. +type Parallelism struct { + QuerierCPULimits int64 + QueryFrontendReplicas int32 +} + +// WriteAheadLog for ingester processing +type WriteAheadLog struct { + Directory string + IngesterMemoryRequest int64 +} + +// Value calculates the floor of the division of +// querier cpu limits to the query frontend replicas +// available. +func (p Parallelism) Value() int32 { + return int32(math.Floor(float64(p.QuerierCPULimits) / float64(p.QueryFrontendReplicas))) +} + +// ReplayMemoryCeiling calculates 50% of the ingester memory +// for the ingester to use for the write-ahead-log capbability. +func (w WriteAheadLog) ReplayMemoryCeiling() string { + value := int64(math.Ceil(float64(w.IngesterMemoryRequest) * float64(0.5))) + return fmt.Sprintf("%d", value) +} diff --git a/operator/internal/manifests/internal/gateway/build.go b/operator/internal/manifests/internal/gateway/build.go new file mode 100644 index 0000000000..262f2997d0 --- /dev/null +++ b/operator/internal/manifests/internal/gateway/build.go @@ -0,0 +1,88 @@ +package gateway + +import ( + "bytes" + "embed" + "io/ioutil" + "text/template" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + + "github.com/ViaQ/logerr/kverrors" +) + +const ( + // LokiGatewayTenantFileName is the name of the tenant config file in the configmap + LokiGatewayTenantFileName = "tenants.yaml" + // LokiGatewayRbacFileName is the name of the rbac config file in the configmap + LokiGatewayRbacFileName = "rbac.yaml" + // LokiGatewayRegoFileName is the name of the lokistack-gateway rego config file in the configmap + LokiGatewayRegoFileName = "lokistack-gateway.rego" + // LokiGatewayMountDir is the path that is mounted from the configmap + LokiGatewayMountDir = "/etc/lokistack-gateway" + // LokiGatewayTLSDir is the path that is mounted from the configmap for TLS + LokiGatewayTLSDir = "/var/run/tls" + // LokiGatewayCABundleDir is the path that is mounted from the configmap for TLS + LokiGatewayCABundleDir = "/var/run/ca" + // LokiGatewayCAFile is the file name of the certificate authority file + LokiGatewayCAFile = "service-ca.crt" + // LokiGatewayCertFile is the file of the X509 server certificate file + LokiGatewayCertFile = "tls.crt" + // LokiGatewayKeyFile is the file name of the server private key + LokiGatewayKeyFile = "tls.key" +) + +var ( + //go:embed gateway-rbac.yaml + lokiGatewayRbacYAMLTmplFile embed.FS + + //go:embed gateway-tenants.yaml + lokiGatewayTenantsYAMLTmplFile embed.FS + + //go:embed lokistack-gateway.rego + lokiStackGatewayRegoTmplFile embed.FS + + lokiGatewayRbacYAMLTmpl = template.Must(template.ParseFS(lokiGatewayRbacYAMLTmplFile, "gateway-rbac.yaml")) + + lokiGatewayTenantsYAMLTmpl = template.Must(template.ParseFS(lokiGatewayTenantsYAMLTmplFile, "gateway-tenants.yaml")) + + lokiStackGatewayRegoTmpl = template.Must(template.ParseFS(lokiStackGatewayRegoTmplFile, "lokistack-gateway.rego")) +) + +// Build builds a loki gateway configuration files +func Build(opts Options) (rbacCfg []byte, tenantsCfg []byte, regoCfg []byte, err error) { + // Build loki gateway rbac yaml + w := bytes.NewBuffer(nil) + err = lokiGatewayRbacYAMLTmpl.Execute(w, opts) + if err != nil { + return nil, nil, nil, kverrors.Wrap(err, "failed to create loki gateway rbac configuration") + } + rbacCfg, err = ioutil.ReadAll(w) + if err != nil { + return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer") + } + // Build loki gateway tenants yaml + w = bytes.NewBuffer(nil) + err = lokiGatewayTenantsYAMLTmpl.Execute(w, opts) + if err != nil { + return nil, nil, nil, kverrors.Wrap(err, "failed to create loki gateway tenants configuration") + } + tenantsCfg, err = ioutil.ReadAll(w) + if err != nil { + return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer") + } + // Build loki gateway observatorium rego for static mode + if opts.Stack.Tenants.Mode == lokiv1beta1.Static { + w = bytes.NewBuffer(nil) + err = lokiStackGatewayRegoTmpl.Execute(w, opts) + if err != nil { + return nil, nil, nil, kverrors.Wrap(err, "failed to create lokistack gateway rego configuration") + } + regoCfg, err = ioutil.ReadAll(w) + if err != nil { + return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer") + } + return rbacCfg, tenantsCfg, regoCfg, nil + } + return rbacCfg, tenantsCfg, nil, nil +} diff --git a/operator/internal/manifests/internal/gateway/build_test.go b/operator/internal/manifests/internal/gateway/build_test.go new file mode 100644 index 0000000000..49fbc527b3 --- /dev/null +++ b/operator/internal/manifests/internal/gateway/build_test.go @@ -0,0 +1,262 @@ +package gateway + +import ( + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests/openshift" + "github.com/stretchr/testify/require" +) + +func TestBuild_StaticMode(t *testing.T) { + expTntCfg := ` +tenants: +- name: test-a + id: test + oidc: + clientID: test + clientSecret: test123 + issuerCAPath: /tmp/ca/path + issuerURL: https://127.0.0.1:5556/dex + redirectURL: https://localhost:8443/oidc/test-a/callback + usernameClaim: test + groupClaim: test + opa: + query: data.lokistack.allow + paths: + - /etc/lokistack-gateway/rbac.yaml + - /etc/lokistack-gateway/lokistack-gateway.rego +` + expRbacCfg := ` +roleBindings: +- name: test-a + roles: + - read-write + subjects: + - kind: user + name: test@example.com +roles: +- name: some-name + permissions: + - read + resources: + - metrics + tenants: + - test-a +` + opts := Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.Static, + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test-a", + TenantID: "test", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: "test", + }, + IssuerURL: "https://127.0.0.1:5556/dex", + RedirectURL: "https://localhost:8443/oidc/test-a/callback", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + Roles: []lokiv1beta1.RoleSpec{ + { + Name: "some-name", + Resources: []string{"metrics"}, + Tenants: []string{"test-a"}, + Permissions: []lokiv1beta1.PermissionType{"read"}, + }, + }, + RoleBindings: []lokiv1beta1.RoleBindingsSpec{ + { + Name: "test-a", + Subjects: []lokiv1beta1.Subject{ + { + Name: "test@example.com", + Kind: "user", + }, + }, + Roles: []string{"read-write"}, + }, + }, + }, + }, + }, + Namespace: "test-ns", + Name: "test", + TenantSecrets: []*Secret{ + { + TenantName: "test-a", + ClientID: "test", + ClientSecret: "test123", + IssuerCAPath: "/tmp/ca/path", + }, + }, + } + rbacConfig, tenantsConfig, regoCfg, err := Build(opts) + require.NoError(t, err) + require.YAMLEq(t, expTntCfg, string(tenantsConfig)) + require.YAMLEq(t, expRbacCfg, string(rbacConfig)) + require.NotEmpty(t, regoCfg) +} + +func TestBuild_DynamicMode(t *testing.T) { + expTntCfg := ` +tenants: +- name: test-a + id: test + oidc: + clientID: test + clientSecret: test123 + issuerCAPath: /tmp/ca/path + issuerURL: https://127.0.0.1:5556/dex + redirectURL: https://localhost:8443/oidc/test-a/callback + usernameClaim: test + groupClaim: test + opa: + url: http://127.0.0.1:8181/v1/data/observatorium/allow +` + opts := Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.Dynamic, + Authentication: []lokiv1beta1.AuthenticationSpec{ + { + TenantName: "test-a", + TenantID: "test", + OIDC: &lokiv1beta1.OIDCSpec{ + Secret: &lokiv1beta1.TenantSecretSpec{ + Name: "test", + }, + IssuerURL: "https://127.0.0.1:5556/dex", + RedirectURL: "https://localhost:8443/oidc/test-a/callback", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + Authorization: &lokiv1beta1.AuthorizationSpec{ + OPA: &lokiv1beta1.OPASpec{ + URL: "http://127.0.0.1:8181/v1/data/observatorium/allow", + }, + }, + }, + }, + Namespace: "test-ns", + Name: "test", + TenantSecrets: []*Secret{ + { + TenantName: "test-a", + ClientID: "test", + ClientSecret: "test123", + IssuerCAPath: "/tmp/ca/path", + }, + }, + } + rbacConfig, tenantsConfig, regoCfg, err := Build(opts) + require.NoError(t, err) + require.YAMLEq(t, expTntCfg, string(tenantsConfig)) + require.Empty(t, rbacConfig) + require.Empty(t, regoCfg) +} + +func TestBuild_OpenshiftLoggingMode(t *testing.T) { + expTntCfg := ` +tenants: +- name: application + id: 32e45e3e-b760-43a2-a7e1-02c5631e56e9 + openshift: + serviceAccount: lokistack-gateway + redirectURL: https://localhost:8443/openshift/application/callback + cookieSecret: abcd + opa: + url: http://127.0.0.1:8080/v1/data/lokistack/allow + withAccessToken: true +- name: infrastructure + id: 40de0532-10a2-430c-9a00-62c46455c118 + openshift: + serviceAccount: lokistack-gateway + redirectURL: https://localhost:8443/openshift/infrastructure/callback + cookieSecret: efgh + opa: + url: http://127.0.0.1:8080/v1/data/lokistack/allow + withAccessToken: true +- name: audit + id: 26d7c49d-182e-4d93-bade-510c6cc3243d + openshift: + serviceAccount: lokistack-gateway + redirectURL: https://localhost:8443/openshift/audit/callback + cookieSecret: deadbeef + opa: + url: http://127.0.0.1:8080/v1/data/lokistack/allow + withAccessToken: true +` + opts := Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.OpenshiftLogging, + }, + }, + OpenShiftOptions: openshift.Options{ + Authentication: []openshift.AuthenticationSpec{ + { + TenantName: "application", + TenantID: "32e45e3e-b760-43a2-a7e1-02c5631e56e9", + ServiceAccount: "lokistack-gateway", + RedirectURL: "https://localhost:8443/openshift/application/callback", + CookieSecret: "abcd", + }, + { + TenantName: "infrastructure", + TenantID: "40de0532-10a2-430c-9a00-62c46455c118", + ServiceAccount: "lokistack-gateway", + RedirectURL: "https://localhost:8443/openshift/infrastructure/callback", + CookieSecret: "efgh", + }, + { + TenantName: "audit", + TenantID: "26d7c49d-182e-4d93-bade-510c6cc3243d", + ServiceAccount: "lokistack-gateway", + RedirectURL: "https://localhost:8443/openshift/audit/callback", + CookieSecret: "deadbeef", + }, + }, + Authorization: openshift.AuthorizationSpec{ + OPAUrl: "http://127.0.0.1:8080/v1/data/lokistack/allow", + }, + }, + Namespace: "test-ns", + Name: "test", + TenantSecrets: []*Secret{ + { + TenantName: "application", + ClientID: "test", + ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", + IssuerCAPath: "./tmp/certs/ca.pem", + }, + { + TenantName: "infrastructure", + ClientID: "test", + ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", + IssuerCAPath: "./tmp/certs/ca.pem", + }, + { + TenantName: "audit", + ClientID: "test", + ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", + IssuerCAPath: "./tmp/certs/ca.pem", + }, + }, + } + + rbacConfig, tenantsConfig, regoCfg, err := Build(opts) + require.NoError(t, err) + require.YAMLEq(t, expTntCfg, string(tenantsConfig)) + require.Empty(t, rbacConfig) + require.Empty(t, regoCfg) +} diff --git a/operator/internal/manifests/internal/gateway/gateway-rbac.yaml b/operator/internal/manifests/internal/gateway/gateway-rbac.yaml new file mode 100644 index 0000000000..fd140ca4cf --- /dev/null +++ b/operator/internal/manifests/internal/gateway/gateway-rbac.yaml @@ -0,0 +1,35 @@ +{{- if eq .Stack.Tenants.Mode "static" -}} +roleBindings: +{{- range $spec := .Stack.Tenants.Authorization.RoleBindings }} +- name: {{ $spec.Name }} + roles: + {{- range $role := $spec.Roles }} + - {{ $role }} + {{- end -}} + {{ print "\n" }} + subjects: + {{- range $subject := $spec.Subjects }} + - kind: {{ $subject.Kind }} + name: {{ $subject.Name }} + {{- end -}} +{{- end -}} +{{ print "\n" }} +roles: +{{- range $spec := .Stack.Tenants.Authorization.Roles }} +- name: {{ $spec.Name }} + permissions: + {{- range $permission := $spec.Permissions }} + - {{ $permission }} + {{- end -}} + {{ print "\n" }} + resources: + {{- range $resource := $spec.Resources }} + - {{ $resource }} + {{- end -}} + {{ print "\n" }} + tenants: + {{- range $tenant := $spec.Tenants }} + - {{ $tenant }} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/operator/internal/manifests/internal/gateway/gateway-tenants.yaml b/operator/internal/manifests/internal/gateway/gateway-tenants.yaml new file mode 100644 index 0000000000..4eb93a4eac --- /dev/null +++ b/operator/internal/manifests/internal/gateway/gateway-tenants.yaml @@ -0,0 +1,81 @@ +tenants: +{{- if $l := . -}} +{{- if eq $l.Stack.Tenants.Mode "static" -}} +{{- range $spec := $l.Stack.Tenants.Authentication }} +- name: {{ $spec.TenantName }} + id: {{ $spec.TenantID }} + oidc: + {{- range $secret := $l.TenantSecrets }} + {{- if eq $secret.TenantName $spec.TenantName -}} + {{ if $secret.ClientID }} + clientID: {{ $secret.ClientID }} + {{- end -}} + {{ if $secret.ClientSecret }} + clientSecret: {{ $secret.ClientSecret }} + {{- end -}} + {{ if $secret.IssuerCAPath }} + issuerCAPath: {{ $secret.IssuerCAPath }} + {{- end -}} + {{- end -}} + {{- end }} + issuerURL: {{ $spec.OIDC.IssuerURL }} + redirectURL: {{ $spec.OIDC.RedirectURL }} + {{ if $spec.OIDC.UsernameClaim }} + usernameClaim: {{ $spec.OIDC.UsernameClaim }} + {{- end -}} + {{- if $spec.OIDC.GroupClaim }} + groupClaim: {{ $spec.OIDC.GroupClaim }} + {{- end }} + opa: + query: data.lokistack.allow + paths: + - /etc/lokistack-gateway/rbac.yaml + - /etc/lokistack-gateway/lokistack-gateway.rego +{{- end -}} +{{- else if eq $l.Stack.Tenants.Mode "dynamic" -}} +{{- if $tenant := $l.Stack.Tenants -}} +{{- range $spec := $tenant.Authentication }} +- name: {{ $spec.TenantName }} + id: {{ $spec.TenantID }} + oidc: + {{- range $secret := $l.TenantSecrets }} + {{- if eq $secret.TenantName $spec.TenantName -}} + {{ if $secret.ClientID }} + clientID: {{ $secret.ClientID }} + {{- end -}} + {{ if $secret.ClientSecret }} + clientSecret: {{ $secret.ClientSecret }} + {{- end -}} + {{ if $secret.IssuerCAPath }} + issuerCAPath: {{ $secret.IssuerCAPath }} + {{- end -}} + {{- end -}} + {{- end }} + issuerURL: {{ $spec.OIDC.IssuerURL }} + redirectURL: {{ $spec.OIDC.RedirectURL }} + {{- if $spec.OIDC.UsernameClaim }} + usernameClaim: {{ $spec.OIDC.UsernameClaim }} + {{- end -}} + {{- if $spec.OIDC.GroupClaim }} + groupClaim: {{ $spec.OIDC.GroupClaim }} + {{- end }} + opa: + url: {{ $tenant.Authorization.OPA.URL }} +{{- end -}} +{{- end -}} +{{- else if eq $l.Stack.Tenants.Mode "openshift-logging" -}} +{{- if $tenant := $l.OpenShiftOptions.Authentication -}} +{{- range $spec := $l.OpenShiftOptions.Authentication }} +- name: {{ $spec.TenantName }} + id: {{ $spec.TenantID }} + openshift: + serviceAccount: {{ $spec.ServiceAccount }} + redirectURL: {{ $spec.RedirectURL }} + cookieSecret: {{ $spec.CookieSecret }} + opa: + url: {{ $l.OpenShiftOptions.Authorization.OPAUrl }} + withAccessToken: true +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/operator/internal/manifests/internal/gateway/lokistack-gateway.rego b/operator/internal/manifests/internal/gateway/lokistack-gateway.rego new file mode 100644 index 0000000000..d347d7193d --- /dev/null +++ b/operator/internal/manifests/internal/gateway/lokistack-gateway.rego @@ -0,0 +1,24 @@ +package lokistack + +import input +import data.roles +import data.roleBindings + +default allow = false + +allow { + some roleNames + roleNames = roleBindings[matched_role_binding[_]].roles + roles[i].name == roleNames[_] + roles[i].resources[_] = input.resource + roles[i].permissions[_] = input.permission + roles[i].tenants[_] = input.tenant +} + +matched_role_binding[i] { + roleBindings[i].subjects[_] == {"name": input.subject, "kind": "user"} +} + +matched_role_binding[i] { + roleBindings[i].subjects[_] == {"name": input.groups[_], "kind": "group"} +} diff --git a/operator/internal/manifests/internal/gateway/options.go b/operator/internal/manifests/internal/gateway/options.go new file mode 100644 index 0000000000..b33dabea13 --- /dev/null +++ b/operator/internal/manifests/internal/gateway/options.go @@ -0,0 +1,33 @@ +package gateway + +import ( + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests/openshift" +) + +// Options is used to render the rbac.yaml and tenants.yaml file template +type Options struct { + Stack lokiv1beta1.LokiStackSpec + + Namespace string + Name string + StorageDirectory string + + OpenShiftOptions openshift.Options + TenantSecrets []*Secret + TenantConfigMap map[string]TenantData +} + +// Secret for clientID, clientSecret and issuerCAPath for tenant's authentication. +type Secret struct { + TenantName string + ClientID string + ClientSecret string + IssuerCAPath string +} + +// TenantData defines the existing tenantID and cookieSecret for lokistack reconcile. +type TenantData struct { + TenantID string + CookieSecret string +} diff --git a/operator/internal/manifests/internal/sizes.go b/operator/internal/manifests/internal/sizes.go new file mode 100644 index 0000000000..1cb7e49dea --- /dev/null +++ b/operator/internal/manifests/internal/sizes.go @@ -0,0 +1,330 @@ +package internal + +import ( + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +// ComponentResources is a map of component->requests/limits +type ComponentResources struct { + IndexGateway ResourceRequirements + Ingester ResourceRequirements + Compactor ResourceRequirements + WALStorage ResourceRequirements + // these two don't need a PVCSize + Querier corev1.ResourceRequirements + Distributor corev1.ResourceRequirements + QueryFrontend corev1.ResourceRequirements + Gateway corev1.ResourceRequirements +} + +// ResourceRequirements sets CPU, Memory, and PVC requirements for a component +type ResourceRequirements struct { + Limits corev1.ResourceList + Requests corev1.ResourceList + PVCSize resource.Quantity +} + +// ResourceRequirementsTable defines the default resource requests and limits for each size +var ResourceRequirementsTable = map[lokiv1beta1.LokiStackSizeType]ComponentResources{ + lokiv1beta1.SizeOneXExtraSmall: { + Querier: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("3Gi"), + }, + }, + Ingester: ResourceRequirements{ + PVCSize: resource.MustParse("1Gi"), + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + Distributor: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + }, + QueryFrontend: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + }, + Compactor: ResourceRequirements{ + PVCSize: resource.MustParse("1Gi"), + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + Gateway: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("256Mi"), + }, + }, + IndexGateway: ResourceRequirements{ + PVCSize: resource.MustParse("5Gi"), + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + WALStorage: ResourceRequirements{ + PVCSize: resource.MustParse("15Gi"), + }, + }, + lokiv1beta1.SizeOneXSmall: { + Querier: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + Ingester: ResourceRequirements{ + PVCSize: resource.MustParse("10Gi"), + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("20Gi"), + }, + }, + Distributor: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + QueryFrontend: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("2.5Gi"), + }, + }, + Compactor: ResourceRequirements{ + PVCSize: resource.MustParse("10Gi"), + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + Gateway: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + IndexGateway: ResourceRequirements{ + PVCSize: resource.MustParse("50Gi"), + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + WALStorage: ResourceRequirements{ + PVCSize: resource.MustParse("150Gi"), + }, + }, + lokiv1beta1.SizeOneXMedium: { + Querier: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("6"), + corev1.ResourceMemory: resource.MustParse("10Gi"), + }, + }, + Ingester: ResourceRequirements{ + PVCSize: resource.MustParse("10Gi"), + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("6"), + corev1.ResourceMemory: resource.MustParse("30Gi"), + }, + }, + Distributor: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + QueryFrontend: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("2.5Gi"), + }, + }, + Compactor: ResourceRequirements{ + PVCSize: resource.MustParse("10Gi"), + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + Gateway: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + IndexGateway: ResourceRequirements{ + PVCSize: resource.MustParse("50Gi"), + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + WALStorage: ResourceRequirements{ + PVCSize: resource.MustParse("150Gi"), + }, + }, +} + +// StackSizeTable defines the default configurations for each size +var StackSizeTable = map[lokiv1beta1.LokiStackSizeType]lokiv1beta1.LokiStackSpec{ + + lokiv1beta1.SizeOneXExtraSmall: { + Size: lokiv1beta1.SizeOneXExtraSmall, + ReplicationFactor: 1, + Limits: &lokiv1beta1.LimitsSpec{ + Global: &lokiv1beta1.LimitsTemplateSpec{ + IngestionLimits: &lokiv1beta1.IngestionLimitSpec{ + // Defaults from Loki docs + IngestionRate: 4, + IngestionBurstSize: 6, + MaxLabelNameLength: 1024, + MaxLabelValueLength: 2048, + MaxLabelNamesPerSeries: 30, + MaxLineSize: 256000, + }, + QueryLimits: &lokiv1beta1.QueryLimitSpec{ + // Defaults from Loki docs + MaxEntriesLimitPerQuery: 5000, + MaxChunksPerQuery: 2000000, + MaxQuerySeries: 500, + }, + }, + }, + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + + lokiv1beta1.SizeOneXSmall: { + Size: lokiv1beta1.SizeOneXSmall, + ReplicationFactor: 2, + Limits: &lokiv1beta1.LimitsSpec{ + Global: &lokiv1beta1.LimitsTemplateSpec{ + IngestionLimits: &lokiv1beta1.IngestionLimitSpec{ + // Custom for 1x.small + IngestionRate: 10, + IngestionBurstSize: 20, + MaxGlobalStreamsPerTenant: 10000, + // Defaults from Loki docs + MaxLabelNameLength: 1024, + MaxLabelValueLength: 2048, + MaxLabelNamesPerSeries: 30, + MaxLineSize: 256000, + }, + QueryLimits: &lokiv1beta1.QueryLimitSpec{ + // Defaults from Loki docs + MaxEntriesLimitPerQuery: 5000, + MaxChunksPerQuery: 2000000, + MaxQuerySeries: 500, + }, + }, + }, + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + }, + }, + + lokiv1beta1.SizeOneXMedium: { + Size: lokiv1beta1.SizeOneXMedium, + ReplicationFactor: 3, + Limits: &lokiv1beta1.LimitsSpec{ + Global: &lokiv1beta1.LimitsTemplateSpec{ + IngestionLimits: &lokiv1beta1.IngestionLimitSpec{ + // Custom for 1x.medium + IngestionRate: 10, + IngestionBurstSize: 20, + MaxGlobalStreamsPerTenant: 25000, + // Defaults from Loki docs + MaxLabelNameLength: 1024, + MaxLabelValueLength: 2048, + MaxLabelNamesPerSeries: 30, + MaxLineSize: 256000, + }, + QueryLimits: &lokiv1beta1.QueryLimitSpec{ + // Defaults from Loki docs + MaxEntriesLimitPerQuery: 5000, + MaxChunksPerQuery: 2000000, + MaxQuerySeries: 500, + }, + }, + }, + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: 3, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: 3, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 2, + }, + }, + }, +} diff --git a/operator/internal/manifests/memberlist.go b/operator/internal/manifests/memberlist.go new file mode 100644 index 0000000000..0290137a35 --- /dev/null +++ b/operator/internal/manifests/memberlist.go @@ -0,0 +1,36 @@ +package manifests + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/intstr" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BuildLokiGossipRingService creates a k8s service for the gossip/memberlist members of the cluster +func BuildLokiGossipRingService(stackName string) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("loki-gossip-ring-%s", stackName), + Labels: commonLabels(stackName), + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: lokiGossipPortName, + Port: gossipPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: gossipPort}, + }, + }, + Selector: commonLabels(stackName), + }, + } +} diff --git a/operator/internal/manifests/mutate.go b/operator/internal/manifests/mutate.go new file mode 100644 index 0000000000..3c5af705c1 --- /dev/null +++ b/operator/internal/manifests/mutate.go @@ -0,0 +1,175 @@ +package manifests + +import ( + "reflect" + + "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/log" + "github.com/imdario/mergo" + routev1 "github.com/openshift/api/route/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// MutateFuncFor returns a mutate function based on the +// existing resource's concrete type. It supports currently +// only the following types or else panics: +// - ConfigMap +// - Service +// - Deployment +// - StatefulSet +// - ServiceMonitor +func MutateFuncFor(existing, desired client.Object) controllerutil.MutateFn { + return func() error { + existingAnnotations := existing.GetAnnotations() + mergeWithOverride(&existingAnnotations, desired.GetAnnotations()) + existing.SetAnnotations(existingAnnotations) + + existingLabels := existing.GetLabels() + mergeWithOverride(&existingLabels, desired.GetLabels()) + existing.SetLabels(existingLabels) + + switch existing.(type) { + case *corev1.ConfigMap: + cm := existing.(*corev1.ConfigMap) + wantCm := desired.(*corev1.ConfigMap) + mutateConfigMap(cm, wantCm) + + case *corev1.Service: + svc := existing.(*corev1.Service) + wantSvc := desired.(*corev1.Service) + mutateService(svc, wantSvc) + + case *corev1.ServiceAccount: + sa := existing.(*corev1.ServiceAccount) + wantSa := desired.(*corev1.ServiceAccount) + mutateServiceAccount(sa, wantSa) + + case *rbacv1.ClusterRole: + cr := existing.(*rbacv1.ClusterRole) + wantCr := desired.(*rbacv1.ClusterRole) + mutateClusterRole(cr, wantCr) + + case *rbacv1.ClusterRoleBinding: + crb := existing.(*rbacv1.ClusterRoleBinding) + wantCrb := desired.(*rbacv1.ClusterRoleBinding) + mutateClusterRoleBinding(crb, wantCrb) + + case *appsv1.Deployment: + dpl := existing.(*appsv1.Deployment) + wantDpl := desired.(*appsv1.Deployment) + mutateDeployment(dpl, wantDpl) + + case *appsv1.StatefulSet: + sts := existing.(*appsv1.StatefulSet) + wantSts := desired.(*appsv1.StatefulSet) + mutateStatefulSet(sts, wantSts) + + case *monitoringv1.ServiceMonitor: + svcMonitor := existing.(*monitoringv1.ServiceMonitor) + wantSvcMonitor := desired.(*monitoringv1.ServiceMonitor) + mutateServiceMonitor(svcMonitor, wantSvcMonitor) + + case *networkingv1.Ingress: + ing := existing.(*networkingv1.Ingress) + wantIng := desired.(*networkingv1.Ingress) + mutateIngress(ing, wantIng) + + case *routev1.Route: + rt := existing.(*routev1.Route) + wantRt := desired.(*routev1.Route) + mutateRoute(rt, wantRt) + + default: + t := reflect.TypeOf(existing).String() + return kverrors.New("missing mutate implementation for resource type", "type", t) + } + return nil + } +} + +func mergeWithOverride(dst, src interface{}) { + err := mergo.Merge(dst, src, mergo.WithOverride) + if err != nil { + log.Error(err, "unable to mergeWithOverride", "dst", dst, "src", src) + } +} + +func mutateConfigMap(existing, desired *corev1.ConfigMap) { + existing.BinaryData = desired.BinaryData +} + +func mutateService(existing, desired *corev1.Service) { + existing.Spec.Ports = desired.Spec.Ports + mergeWithOverride(&existing.Spec.Selector, desired.Spec.Selector) +} + +func mutateServiceAccount(existing, desired *corev1.ServiceAccount) { + existing.Annotations = desired.Annotations + existing.Labels = desired.Labels +} + +func mutateClusterRole(existing, desired *rbacv1.ClusterRole) { + existing.Annotations = desired.Annotations + existing.Labels = desired.Labels + existing.Rules = desired.Rules +} + +func mutateClusterRoleBinding(existing, desired *rbacv1.ClusterRoleBinding) { + existing.Labels = desired.Labels + existing.Subjects = desired.Subjects + existing.RoleRef = desired.RoleRef + existing.Subjects = desired.Subjects +} + +func mutateDeployment(existing, desired *appsv1.Deployment) { + // Deployment selector is immutable so we set this value only if + // a new object is going to be created + if existing.CreationTimestamp.IsZero() { + mergeWithOverride(existing.Spec.Selector, desired.Spec.Selector) + } + existing.Spec.Replicas = desired.Spec.Replicas + mergeWithOverride(&existing.Spec.Template, desired.Spec.Template) + mergeWithOverride(&existing.Spec.Strategy, desired.Spec.Strategy) +} + +func mutateStatefulSet(existing, desired *appsv1.StatefulSet) { + // StatefulSet selector is immutable so we set this value only if + // a new object is going to be created + if existing.CreationTimestamp.IsZero() { + existing.Spec.Selector = desired.Spec.Selector + } + existing.Spec.PodManagementPolicy = desired.Spec.PodManagementPolicy + existing.Spec.Replicas = desired.Spec.Replicas + mergeWithOverride(&existing.Spec.Template, desired.Spec.Template) + for i := range existing.Spec.VolumeClaimTemplates { + existing.Spec.VolumeClaimTemplates[i].TypeMeta = desired.Spec.VolumeClaimTemplates[i].TypeMeta + existing.Spec.VolumeClaimTemplates[i].ObjectMeta = desired.Spec.VolumeClaimTemplates[i].ObjectMeta + existing.Spec.VolumeClaimTemplates[i].Spec = desired.Spec.VolumeClaimTemplates[i].Spec + } +} + +func mutateServiceMonitor(existing, desired *monitoringv1.ServiceMonitor) { + // ServiceMonitor selector is immutable so we set this value only if + // a new object is going to be created +} + +func mutateIngress(existing, desired *networkingv1.Ingress) { + existing.Labels = desired.Labels + existing.Annotations = desired.Annotations + existing.Spec.DefaultBackend = desired.Spec.DefaultBackend + existing.Spec.Rules = desired.Spec.Rules + existing.Spec.TLS = desired.Spec.TLS +} + +func mutateRoute(existing, desired *routev1.Route) { + existing.Annotations = desired.Annotations + existing.Labels = desired.Labels + existing.Spec = desired.Spec +} diff --git a/operator/internal/manifests/mutate_test.go b/operator/internal/manifests/mutate_test.go new file mode 100644 index 0000000000..c58b470f26 --- /dev/null +++ b/operator/internal/manifests/mutate_test.go @@ -0,0 +1,939 @@ +package manifests_test + +import ( + "testing" + + routev1 "github.com/openshift/api/route/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + + "github.com/grafana/loki-operator/internal/manifests" + "github.com/stretchr/testify/require" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" +) + +func TestGetMutateFunc_MutateObjectMeta(t *testing.T) { + got := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + }, + Annotations: map[string]string{ + "test": "test", + }, + }, + } + + want := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + }, + Annotations: map[string]string{ + "test": "test", + }, + }, + } + + f := manifests.MutateFuncFor(got, want) + err := f() + require.NoError(t, err) + + // Partial mutation checks + require.Exactly(t, got.Labels, want.Labels) + require.Exactly(t, got.Annotations, want.Annotations) +} + +func TestGetMutateFunc_ReturnErrOnNotSupportedType(t *testing.T) { + got := &corev1.Endpoints{} + want := &corev1.Endpoints{} + f := manifests.MutateFuncFor(got, want) + + require.Error(t, f()) +} + +func TestGetMutateFunc_MutateConfigMap(t *testing.T) { + got := &corev1.ConfigMap{ + Data: map[string]string{"test": "remain"}, + BinaryData: map[string][]byte{}, + } + + want := &corev1.ConfigMap{ + Data: map[string]string{"test": "test"}, + BinaryData: map[string][]byte{"btest": []byte("btestss")}, + } + + f := manifests.MutateFuncFor(got, want) + err := f() + require.NoError(t, err) + + // Ensure partial mutation applied + require.Equal(t, got.Labels, want.Labels) + require.Equal(t, got.Annotations, want.Annotations) + require.Equal(t, got.BinaryData, got.BinaryData) + + // Ensure not mutated + require.NotEqual(t, got.Data, want.Data) +} + +func TestGetMutateFunc_MutateServiceSpec(t *testing.T) { + got := &corev1.Service{ + Spec: corev1.ServiceSpec{ + ClusterIP: "none", + ClusterIPs: []string{"8.8.8.8"}, + Ports: []corev1.ServicePort{ + { + Protocol: corev1.ProtocolTCP, + Port: 7777, + TargetPort: intstr.FromString("8888"), + }, + }, + Selector: map[string]string{ + "select": "that", + }, + }, + } + + want := &corev1.Service{ + Spec: corev1.ServiceSpec{ + ClusterIP: "none", + ClusterIPs: []string{"8.8.8.8", "9.9.9.9"}, + Ports: []corev1.ServicePort{ + { + Protocol: corev1.ProtocolTCP, + Port: 9999, + TargetPort: intstr.FromString("1111"), + }, + }, + Selector: map[string]string{ + "select": "that", + "and": "other", + }, + }, + } + + f := manifests.MutateFuncFor(got, want) + err := f() + require.NoError(t, err) + + // Ensure partial mutation applied + require.ElementsMatch(t, got.Spec.Ports, want.Spec.Ports) + require.Exactly(t, got.Spec.Selector, want.Spec.Selector) + + // Ensure not mutated + require.Equal(t, got.Spec.ClusterIP, "none") + require.Exactly(t, got.Spec.ClusterIPs, []string{"8.8.8.8"}) +} + +func TestGetMutateFunc_MutateServiceAccountObjectMeta(t *testing.T) { + type test struct { + name string + got *corev1.ServiceAccount + want *corev1.ServiceAccount + } + table := []test{ + { + name: "update object meta", + got: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + }, + Annotations: map[string]string{ + "test": "test", + }, + }, + }, + want: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + "other": "label", + }, + Annotations: map[string]string{ + "test": "test", + "other": "annotation", + }, + }, + }, + }, + { + name: "no update secrets", + got: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + }, + Annotations: map[string]string{ + "test": "test", + }, + }, + Secrets: []corev1.ObjectReference{ + {Name: "secret-me"}, + }, + }, + want: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + "other": "label", + }, + Annotations: map[string]string{ + "test": "test", + "other": "annotation", + }, + }, + Secrets: []corev1.ObjectReference{ + {Name: "secret-me"}, + {Name: "another-secret"}, + }, + }, + }, + { + name: "no update image pull secrets", + got: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + }, + Annotations: map[string]string{ + "test": "test", + }, + }, + ImagePullSecrets: []corev1.LocalObjectReference{ + {Name: "secret-me"}, + }, + }, + want: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + "other": "label", + }, + Annotations: map[string]string{ + "test": "test", + "other": "annotation", + }, + }, + ImagePullSecrets: []corev1.LocalObjectReference{ + {Name: "secret-me"}, + {Name: "another-secret"}, + }, + }, + }, + } + + for _, tt := range table { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + f := manifests.MutateFuncFor(tt.got, tt.want) + err := f() + require.NoError(t, err) + + // Partial mutation checks + require.Exactly(t, tt.got.Labels, tt.want.Labels) + require.Exactly(t, tt.got.Annotations, tt.want.Annotations) + + if tt.got.Secrets != nil { + require.NotEqual(t, tt.got.Secrets, tt.want.Secrets) + } + if tt.got.ImagePullSecrets != nil { + require.NotEqual(t, tt.got.ImagePullSecrets, tt.want.ImagePullSecrets) + } + }) + } +} + +func TestGetMutateFunc_MutateClusterRole(t *testing.T) { + got := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + }, + Annotations: map[string]string{ + "test": "test", + }, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"group-a"}, + Resources: []string{"res-a"}, + Verbs: []string{"get"}, + }, + }, + } + + want := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + "other": "label", + }, + Annotations: map[string]string{ + "test": "test", + "other": "annotation", + }, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"groupa-a"}, + Resources: []string{"resa-a"}, + Verbs: []string{"get", "create"}, + }, + { + APIGroups: []string{"groupa-b"}, + Resources: []string{"resa-b"}, + Verbs: []string{"list", "create"}, + }, + }, + } + + f := manifests.MutateFuncFor(got, want) + err := f() + require.NoError(t, err) + + // Partial mutation checks + require.Exactly(t, got.Labels, want.Labels) + require.Exactly(t, got.Annotations, want.Annotations) + require.Exactly(t, got.Rules, want.Rules) +} + +func TestGetMutateFunc_MutateClusterRoleBinding(t *testing.T) { + got := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + }, + Annotations: map[string]string{ + "test": "test", + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "a-role", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "service-me", + Namespace: "stack-ns", + }, + }, + } + + want := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + "other": "label", + }, + Annotations: map[string]string{ + "test": "test", + "other": "annotation", + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "b-role", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "service-me", + Namespace: "stack-ns", + }, + { + Kind: "User", + Name: "a-user", + }, + }, + } + + f := manifests.MutateFuncFor(got, want) + err := f() + require.NoError(t, err) + + // Partial mutation checks + require.Exactly(t, got.Labels, want.Labels) + require.Exactly(t, got.Annotations, want.Annotations) + require.Exactly(t, got.RoleRef, want.RoleRef) + require.Exactly(t, got.Subjects, want.Subjects) +} + +func TestGeMutateFunc_MutateDeploymentSpec(t *testing.T) { + type test struct { + name string + got *appsv1.Deployment + want *appsv1.Deployment + } + table := []test{ + { + name: "initial creation", + got: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + Replicas: pointer.Int32Ptr(1), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test"}, + }, + }, + }, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType, + }, + }, + }, + want: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + "and": "another", + }, + }, + Replicas: pointer.Int32Ptr(2), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Args: []string{"--do-nothing"}, + }, + }, + }, + }, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + }, + }, + }, + }, + { + name: "update spec without selector", + got: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Now()}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + Replicas: pointer.Int32Ptr(1), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test"}, + }, + }, + }, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType, + }, + }, + }, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Now()}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + "and": "another", + }, + }, + Replicas: pointer.Int32Ptr(2), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Args: []string{"--do-nothing"}, + }, + }, + }, + }, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + }, + }, + }, + }, + } + for _, tst := range table { + tst := tst + t.Run(tst.name, func(t *testing.T) { + t.Parallel() + f := manifests.MutateFuncFor(tst.got, tst.want) + err := f() + require.NoError(t, err) + + // Ensure conditional mutation applied + if tst.got.CreationTimestamp.IsZero() { + require.Equal(t, tst.got.Spec.Selector, tst.want.Spec.Selector) + } else { + require.NotEqual(t, tst.got.Spec.Selector, tst.want.Spec.Selector) + } + + // Ensure partial mutation applied + require.Equal(t, tst.got.Spec.Replicas, tst.want.Spec.Replicas) + require.Equal(t, tst.got.Spec.Template, tst.want.Spec.Template) + require.Equal(t, tst.got.Spec.Strategy, tst.want.Spec.Strategy) + }) + } +} + +func TestGeMutateFunc_MutateStatefulSetSpec(t *testing.T) { + type test struct { + name string + got *appsv1.StatefulSet + want *appsv1.StatefulSet + } + table := []test{ + { + name: "initial creation", + got: &appsv1.StatefulSet{ + Spec: appsv1.StatefulSetSpec{ + PodManagementPolicy: appsv1.ParallelPodManagement, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + Replicas: pointer.Int32Ptr(1), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test"}, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + }, + }, + }, + }, + }, + want: &appsv1.StatefulSet{ + Spec: appsv1.StatefulSetSpec{ + PodManagementPolicy: appsv1.OrderedReadyPodManagement, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + "and": "another", + }, + }, + Replicas: pointer.Int32Ptr(2), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Args: []string{"--do-nothing"}, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + corev1.ReadOnlyMany, + }, + }, + }, + }, + }, + }, + }, + { + name: "update spec without selector", + got: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Now()}, + Spec: appsv1.StatefulSetSpec{ + PodManagementPolicy: appsv1.ParallelPodManagement, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + Replicas: pointer.Int32Ptr(1), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test"}, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + }, + }, + }, + }, + }, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Now()}, + Spec: appsv1.StatefulSetSpec{ + PodManagementPolicy: appsv1.OrderedReadyPodManagement, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + "and": "another", + }, + }, + Replicas: pointer.Int32Ptr(2), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Args: []string{"--do-nothing"}, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + corev1.ReadWriteMany, + }, + }, + }, + }, + }, + }, + }, + } + for _, tst := range table { + tst := tst + t.Run(tst.name, func(t *testing.T) { + t.Parallel() + f := manifests.MutateFuncFor(tst.got, tst.want) + err := f() + require.NoError(t, err) + + // Ensure conditional mutation applied + if tst.got.CreationTimestamp.IsZero() { + require.Equal(t, tst.got.Spec.Selector, tst.want.Spec.Selector) + } else { + require.NotEqual(t, tst.got.Spec.Selector, tst.want.Spec.Selector) + } + + // Ensure partial mutation applied + require.Equal(t, tst.got.Spec.Replicas, tst.want.Spec.Replicas) + require.Equal(t, tst.got.Spec.Template, tst.want.Spec.Template) + require.Equal(t, tst.got.Spec.VolumeClaimTemplates, tst.got.Spec.VolumeClaimTemplates) + }) + } +} + +func TestGetMutateFunc_MutateServiceMonitorSpec(t *testing.T) { + type test struct { + name string + got *monitoringv1.ServiceMonitor + want *monitoringv1.ServiceMonitor + } + table := []test{ + { + name: "initial creation", + got: &monitoringv1.ServiceMonitor{ + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "some-job", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "loki-test", + Path: "/some-path", + Scheme: "https", + BearerTokenFile: manifests.BearerTokenFile, + TLSConfig: &monitoringv1.TLSConfig{ + SafeTLSConfig: monitoringv1.SafeTLSConfig{ + ServerName: "loki-test.some-ns.svc.cluster.local", + }, + CAFile: manifests.PrometheusCAFile, + }, + }, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + NamespaceSelector: monitoringv1.NamespaceSelector{ + MatchNames: []string{"some-ns"}, + }, + }, + }, + want: &monitoringv1.ServiceMonitor{ + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "some-job-new", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "loki-test", + Path: "/some-path", + Scheme: "https", + BearerTokenFile: manifests.BearerTokenFile, + TLSConfig: &monitoringv1.TLSConfig{ + SafeTLSConfig: monitoringv1.SafeTLSConfig{ + ServerName: "loki-test.some-ns.svc.cluster.local", + }, + CAFile: manifests.PrometheusCAFile, + }, + }, + { + Port: "loki-test", + Path: "/some-new-path", + Scheme: "https", + BearerTokenFile: manifests.BearerTokenFile, + TLSConfig: &monitoringv1.TLSConfig{ + SafeTLSConfig: monitoringv1.SafeTLSConfig{ + ServerName: "loki-test.some-ns.svc.cluster.local", + }, + CAFile: manifests.PrometheusCAFile, + }, + }, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + "and": "another", + }, + }, + NamespaceSelector: monitoringv1.NamespaceSelector{ + MatchNames: []string{"some-ns-new"}, + }, + }, + }, + }, + { + name: "update spec without selector", + got: &monitoringv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Now()}, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "some-job", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "loki-test", + Path: "/some-path", + Scheme: "https", + BearerTokenFile: manifests.BearerTokenFile, + TLSConfig: &monitoringv1.TLSConfig{ + SafeTLSConfig: monitoringv1.SafeTLSConfig{ + ServerName: "loki-test.some-ns.svc.cluster.local", + }, + CAFile: manifests.PrometheusCAFile, + }, + }, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + NamespaceSelector: monitoringv1.NamespaceSelector{ + MatchNames: []string{"some-ns"}, + }, + }, + }, + want: &monitoringv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Now()}, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "some-job-new", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "loki-test", + Path: "/some-path", + Scheme: "https", + BearerTokenFile: manifests.BearerTokenFile, + TLSConfig: &monitoringv1.TLSConfig{ + SafeTLSConfig: monitoringv1.SafeTLSConfig{ + ServerName: "loki-test.some-ns.svc.cluster.local", + }, + CAFile: manifests.PrometheusCAFile, + }, + }, + { + Port: "loki-test", + Path: "/some-new-path", + Scheme: "https", + BearerTokenFile: manifests.BearerTokenFile, + TLSConfig: &monitoringv1.TLSConfig{ + SafeTLSConfig: monitoringv1.SafeTLSConfig{ + ServerName: "loki-test.some-ns.svc.cluster.local", + }, + CAFile: manifests.PrometheusCAFile, + }, + }, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + "and": "another", + }, + }, + NamespaceSelector: monitoringv1.NamespaceSelector{ + MatchNames: []string{"some-ns-new"}, + }, + }, + }, + }, + } + for _, tst := range table { + tst := tst + t.Run(tst.name, func(t *testing.T) { + t.Parallel() + f := manifests.MutateFuncFor(tst.got, tst.want) + err := f() + require.NoError(t, err) + + // Ensure not mutated + require.NotEqual(t, tst.got.Spec.JobLabel, tst.want.Spec.JobLabel) + require.NotEqual(t, tst.got.Spec.Endpoints, tst.want.Spec.Endpoints) + require.NotEqual(t, tst.got.Spec.NamespaceSelector, tst.want.Spec.NamespaceSelector) + }) + } +} + +func TestGetMutateFunc_MutateIngress(t *testing.T) { + pt := networkingv1.PathTypeExact + got := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + }, + Annotations: map[string]string{ + "test": "test", + }, + }, + } + want := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + "other": "label", + }, + Annotations: map[string]string{ + "test": "test", + "other": "annotation", + }, + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "a-service", + Port: networkingv1.ServiceBackendPort{ + Name: "a-port", + }, + }, + }, + Rules: []networkingv1.IngressRule{ + { + Host: "a-host", + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + PathType: &pt, + Path: "/to/a/service", + }, + }, + }, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + { + Hosts: []string{"a-host"}, + SecretName: "a-sercet", + }, + }, + }, + } + + f := manifests.MutateFuncFor(got, want) + err := f() + require.NoError(t, err) + + // Partial mutation checks + require.Exactly(t, got.Labels, want.Labels) + require.Exactly(t, got.Annotations, want.Annotations) + require.Exactly(t, got.Spec.DefaultBackend, want.Spec.DefaultBackend) + require.Exactly(t, got.Spec.Rules, want.Spec.Rules) + require.Exactly(t, got.Spec.TLS, want.Spec.TLS) +} + +func TestGetMutateFunc_MutateRoute(t *testing.T) { + got := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + }, + Annotations: map[string]string{ + "test": "test", + }, + }, + } + + want := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test": "test", + "other": "label", + }, + Annotations: map[string]string{ + "test": "test", + "other": "annotation", + }, + }, + Spec: routev1.RouteSpec{ + Host: "a-host", + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: "a-service", + Weight: pointer.Int32(100), + }, + TLS: &routev1.TLSConfig{ + Termination: routev1.TLSTerminationReencrypt, + Certificate: "a-cert", + Key: "a-key", + CACertificate: "a-ca-cert", + DestinationCACertificate: "a-dst-ca-cert", + InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyRedirect, + }, + }, + } + + f := manifests.MutateFuncFor(got, want) + err := f() + require.NoError(t, err) + + // Partial mutation checks + require.Exactly(t, got.Labels, want.Labels) + require.Exactly(t, got.Annotations, want.Annotations) + require.Exactly(t, got.Spec, want.Spec) +} diff --git a/operator/internal/manifests/node_placement_test.go b/operator/internal/manifests/node_placement_test.go new file mode 100644 index 0000000000..0c4ff57f85 --- /dev/null +++ b/operator/internal/manifests/node_placement_test.go @@ -0,0 +1,196 @@ +package manifests + +import ( + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" +) + +func TestTolerationsAreSetForEachComponent(t *testing.T) { + tolerations := []corev1.Toleration{{ + Key: "type", + Operator: corev1.TolerationOpEqual, + Value: "storage", + Effect: corev1.TaintEffectNoSchedule, + }} + optsWithTolerations := Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Tolerations: tolerations, + Replicas: 1, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Tolerations: tolerations, + Replicas: 1, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Tolerations: tolerations, + Replicas: 1, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Tolerations: tolerations, + Replicas: 1, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Tolerations: tolerations, + Replicas: 1, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Tolerations: tolerations, + Replicas: 1, + }, + }, + }, + ObjectStorage: ObjectStorage{}, + } + + optsWithoutTolerations := Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + ObjectStorage: ObjectStorage{}, + } + + t.Run("distributor", func(t *testing.T) { + assert.Equal(t, tolerations, NewDistributorDeployment(optsWithTolerations).Spec.Template.Spec.Tolerations) + assert.Empty(t, NewDistributorDeployment(optsWithoutTolerations).Spec.Template.Spec.Tolerations) + }) + + t.Run("query_frontend", func(t *testing.T) { + assert.Equal(t, tolerations, NewQueryFrontendDeployment(optsWithTolerations).Spec.Template.Spec.Tolerations) + assert.Empty(t, NewQueryFrontendDeployment(optsWithoutTolerations).Spec.Template.Spec.Tolerations) + }) + + t.Run("querier", func(t *testing.T) { + assert.Equal(t, tolerations, NewQuerierDeployment(optsWithTolerations).Spec.Template.Spec.Tolerations) + assert.Empty(t, NewQuerierDeployment(optsWithoutTolerations).Spec.Template.Spec.Tolerations) + }) + + t.Run("ingester", func(t *testing.T) { + assert.Equal(t, tolerations, NewIngesterStatefulSet(optsWithTolerations).Spec.Template.Spec.Tolerations) + assert.Empty(t, NewIngesterStatefulSet(optsWithoutTolerations).Spec.Template.Spec.Tolerations) + }) + + t.Run("compactor", func(t *testing.T) { + assert.Equal(t, tolerations, NewCompactorStatefulSet(optsWithTolerations).Spec.Template.Spec.Tolerations) + assert.Empty(t, NewCompactorStatefulSet(optsWithoutTolerations).Spec.Template.Spec.Tolerations) + }) + + t.Run("index_gateway", func(t *testing.T) { + assert.Equal(t, tolerations, NewIndexGatewayStatefulSet(optsWithTolerations).Spec.Template.Spec.Tolerations) + assert.Empty(t, NewIndexGatewayStatefulSet(optsWithoutTolerations).Spec.Template.Spec.Tolerations) + }) +} + +func TestNodeSelectorsAreSetForEachComponent(t *testing.T) { + nodeSelectors := map[string]string{"type": "storage"} + optsWithNodeSelectors := Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + NodeSelector: nodeSelectors, + Replicas: 1, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + NodeSelector: nodeSelectors, + Replicas: 1, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + NodeSelector: nodeSelectors, + Replicas: 1, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + NodeSelector: nodeSelectors, + Replicas: 1, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + NodeSelector: nodeSelectors, + Replicas: 1, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + NodeSelector: nodeSelectors, + Replicas: 1, + }, + }, + }, + ObjectStorage: ObjectStorage{}, + } + + optsWithoutNodeSelectors := Options{ + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + ObjectStorage: ObjectStorage{}, + } + + t.Run("distributor", func(t *testing.T) { + assert.Equal(t, nodeSelectors, NewDistributorDeployment(optsWithNodeSelectors).Spec.Template.Spec.NodeSelector) + assert.Empty(t, NewDistributorDeployment(optsWithoutNodeSelectors).Spec.Template.Spec.NodeSelector) + }) + + t.Run("query_frontend", func(t *testing.T) { + assert.Equal(t, nodeSelectors, NewQueryFrontendDeployment(optsWithNodeSelectors).Spec.Template.Spec.NodeSelector) + assert.Empty(t, NewQueryFrontendDeployment(optsWithoutNodeSelectors).Spec.Template.Spec.NodeSelector) + }) + + t.Run("querier", func(t *testing.T) { + assert.Equal(t, nodeSelectors, NewQuerierDeployment(optsWithNodeSelectors).Spec.Template.Spec.NodeSelector) + assert.Empty(t, NewQuerierDeployment(optsWithoutNodeSelectors).Spec.Template.Spec.NodeSelector) + }) + + t.Run("ingester", func(t *testing.T) { + assert.Equal(t, nodeSelectors, NewIngesterStatefulSet(optsWithNodeSelectors).Spec.Template.Spec.NodeSelector) + assert.Empty(t, NewIngesterStatefulSet(optsWithoutNodeSelectors).Spec.Template.Spec.NodeSelector) + }) + + t.Run("compactor", func(t *testing.T) { + assert.Equal(t, nodeSelectors, NewCompactorStatefulSet(optsWithNodeSelectors).Spec.Template.Spec.NodeSelector) + assert.Empty(t, NewCompactorStatefulSet(optsWithoutNodeSelectors).Spec.Template.Spec.NodeSelector) + }) + + t.Run("index_gateway", func(t *testing.T) { + assert.Equal(t, nodeSelectors, NewIndexGatewayStatefulSet(optsWithNodeSelectors).Spec.Template.Spec.NodeSelector) + assert.Empty(t, NewIndexGatewayStatefulSet(optsWithoutNodeSelectors).Spec.Template.Spec.NodeSelector) + }) +} diff --git a/operator/internal/manifests/openshift/build.go b/operator/internal/manifests/openshift/build.go new file mode 100644 index 0000000000..59f6936587 --- /dev/null +++ b/operator/internal/manifests/openshift/build.go @@ -0,0 +1,20 @@ +package openshift + +import "sigs.k8s.io/controller-runtime/pkg/client" + +// Build returns a list of auxiliary openshift/k8s objects +// for lokistack gateway deployments on OpenShift. +func Build(opts Options) []client.Object { + objs := []client.Object{ + BuildRoute(opts), + BuildServiceAccount(opts), + BuildClusterRole(opts), + BuildClusterRoleBinding(opts), + } + + if opts.BuildOpts.EnableCertificateSigningService { + objs = append(objs, BuildServiceCAConfigMap(opts)) + } + + return objs +} diff --git a/operator/internal/manifests/openshift/build_test.go b/operator/internal/manifests/openshift/build_test.go new file mode 100644 index 0000000000..c20c5e22d9 --- /dev/null +++ b/operator/internal/manifests/openshift/build_test.go @@ -0,0 +1,61 @@ +package openshift + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + routev1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" +) + +func TestBuild_ServiceAccountRefMatches(t *testing.T) { + opts := NewOptions("abc", "abc", "efgh", "example.com", "abc", "abc", map[string]string{}, false, map[string]TenantData{}) + + objs := Build(opts) + sa := objs[1].(*corev1.ServiceAccount) + rb := objs[3].(*rbacv1.ClusterRoleBinding) + + require.Equal(t, sa.Kind, rb.Subjects[0].Kind) + require.Equal(t, sa.Name, rb.Subjects[0].Name) + require.Equal(t, sa.Namespace, rb.Subjects[0].Namespace) +} + +func TestBuild_ClusterRoleRefMatches(t *testing.T) { + opts := NewOptions("abc", "abc", "efgh", "example.com", "abc", "abc", map[string]string{}, false, map[string]TenantData{}) + + objs := Build(opts) + cr := objs[2].(*rbacv1.ClusterRole) + rb := objs[3].(*rbacv1.ClusterRoleBinding) + + require.Equal(t, cr.Kind, rb.RoleRef.Kind) + require.Equal(t, cr.Name, rb.RoleRef.Name) +} + +func TestBuild_ServiceAccountAnnotationsRouteRefMatches(t *testing.T) { + opts := NewOptions("abc", "abc", "efgh", "example.com", "abc", "abc", map[string]string{}, false, map[string]TenantData{}) + + objs := Build(opts) + rt := objs[0].(*routev1.Route) + sa := objs[1].(*corev1.ServiceAccount) + + type oauthRedirectReference struct { + Kind string `json:"kind"` + APIVersion string `json:"apiVersion"` + Ref *struct { + Kind string `json:"kind"` + Name string `json:"name"` + } `json:"reference"` + } + + for _, a := range sa.Annotations { + oauthRef := oauthRedirectReference{} + err := json.Unmarshal([]byte(a), &oauthRef) + require.NoError(t, err) + + require.Equal(t, rt.Name, oauthRef.Ref.Name) + require.Equal(t, rt.Kind, oauthRef.Ref.Kind) + } +} diff --git a/operator/internal/manifests/openshift/configure.go b/operator/internal/manifests/openshift/configure.go new file mode 100644 index 0000000000..dec2f13934 --- /dev/null +++ b/operator/internal/manifests/openshift/configure.go @@ -0,0 +1,161 @@ +package openshift + +import ( + "fmt" + "regexp" + "strings" + + "github.com/ViaQ/logerr/kverrors" + "github.com/imdario/mergo" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +const ( + // tenantApplication is the name of the tenant holding application logs. + tenantApplication = "application" + // tenantInfrastructure is the name of the tenant holding infrastructure logs. + tenantInfrastructure = "infrastructure" + // tenantAudit is the name of the tenant holding audit logs. + tenantAudit = "audit" +) + +var ( + // defaultTenants represents the slice of all supported LokiStack on OpenShift. + defaultTenants = []string{ + tenantApplication, + tenantInfrastructure, + tenantAudit, + } + + logsEndpointRe = regexp.MustCompile(`.*logs..*.endpoint.*`) +) + +// ConfigureGatewayDeployment merges an OpenPolicyAgent sidecar into the deployment spec. +// With this, the deployment will route authorization request to the OpenShift +// apiserver through the sidecar. +func ConfigureGatewayDeployment( + d *appsv1.Deployment, + gwContainerName string, + sercretVolumeName, tlsDir, certFile, keyFile string, + caDir, caFile string, + withTLS, withCertSigningService bool, +) error { + var gwIndex int + for i, c := range d.Spec.Template.Spec.Containers { + if c.Name == gwContainerName { + gwIndex = i + break + } + } + + gwContainer := d.Spec.Template.Spec.Containers[gwIndex].DeepCopy() + gwArgs := gwContainer.Args + gwVolumes := d.Spec.Template.Spec.Volumes + + if withCertSigningService { + for i, a := range gwArgs { + if logsEndpointRe.MatchString(a) { + gwContainer.Args[i] = strings.Replace(a, "http", "https", 1) + } + } + + gwArgs = append(gwArgs, fmt.Sprintf("--logs.tls.ca-file=%s/%s", caDir, caFile)) + + caBundleVolumeName := serviceCABundleName(Options{ + BuildOpts: BuildOptions{ + GatewayName: d.GetName(), + }, + }) + + gwContainer.VolumeMounts = append(gwContainer.VolumeMounts, corev1.VolumeMount{ + Name: caBundleVolumeName, + ReadOnly: true, + MountPath: caDir, + }) + + gwVolumes = append(gwVolumes, corev1.Volume{ + Name: caBundleVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: caBundleVolumeName, + }, + }, + }, + }) + } + + gwContainer.Args = gwArgs + + p := corev1.PodSpec{ + ServiceAccountName: d.GetName(), + Containers: []corev1.Container{ + *gwContainer, + newOPAOpenShiftContainer(sercretVolumeName, tlsDir, certFile, keyFile, withTLS), + }, + Volumes: gwVolumes, + } + + if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithOverride); err != nil { + return kverrors.Wrap(err, "failed to merge sidecar container spec ") + } + + return nil +} + +// ConfigureGatewayService merges the OpenPolicyAgent sidecar metrics port into +// the service spec. With this the metrics are exposed through the same service. +func ConfigureGatewayService(s *corev1.ServiceSpec) error { + spec := corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: opaMetricsPortName, + Port: GatewayOPAInternalPort, + }, + }, + } + + if err := mergo.Merge(s, spec, mergo.WithAppendSlice); err != nil { + return kverrors.Wrap(err, "failed to merge sidecar service ports") + } + + return nil +} + +// ConfigureGatewayServiceMonitor merges the OpenPolicyAgent sidecar endpoint into +// the service monitor. With this cluster-monitoring prometheus can scrape +// the sidecar metrics. +func ConfigureGatewayServiceMonitor(sm *monitoringv1.ServiceMonitor, withTLS bool) error { + var opaEndpoint monitoringv1.Endpoint + + if withTLS { + tlsConfig := sm.Spec.Endpoints[0].TLSConfig + opaEndpoint = monitoringv1.Endpoint{ + Port: opaMetricsPortName, + Path: "/metrics", + Scheme: "https", + BearerTokenFile: bearerTokenFile, + TLSConfig: tlsConfig, + } + } else { + opaEndpoint = monitoringv1.Endpoint{ + Port: opaMetricsPortName, + Path: "/metrics", + Scheme: "http", + } + } + + spec := monitoringv1.ServiceMonitorSpec{ + Endpoints: []monitoringv1.Endpoint{opaEndpoint}, + } + + if err := mergo.Merge(&sm.Spec, spec, mergo.WithAppendSlice); err != nil { + return kverrors.Wrap(err, "failed to merge sidecar service monitor endpoints") + } + + return nil +} diff --git a/operator/internal/manifests/openshift/opa_openshift.go b/operator/internal/manifests/openshift/opa_openshift.go new file mode 100644 index 0000000000..bf175e684f --- /dev/null +++ b/operator/internal/manifests/openshift/opa_openshift.go @@ -0,0 +1,109 @@ +package openshift + +import ( + "fmt" + "os" + "path" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + envRelatedImageOPA = "RELATED_IMAGE_OPA" + defaultOPAImage = "quay.io/observatorium/opa-openshift:latest" + opaContainerName = "opa" + opaDefaultPackage = "lokistack" + opaDefaultAPIGroup = "loki.openshift.io" + opaMetricsPortName = "opa-metrics" +) + +func newOPAOpenShiftContainer(sercretVolumeName, tlsDir, certFile, keyFile string, withTLS bool) corev1.Container { + var ( + image string + args []string + uriScheme corev1.URIScheme + volumeMounts []corev1.VolumeMount + ) + + image = os.Getenv(envRelatedImageOPA) + if image == "" { + image = defaultOPAImage + } + + uriScheme = corev1.URISchemeHTTP + args = []string{ + "--log.level=warn", + fmt.Sprintf("--opa.package=%s", opaDefaultPackage), + fmt.Sprintf("--web.listen=:%d", GatewayOPAHTTPPort), + fmt.Sprintf("--web.internal.listen=:%d", GatewayOPAInternalPort), + fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", GatewayOPAHTTPPort), + } + + if withTLS { + certFilePath := path.Join(tlsDir, certFile) + keyFilePath := path.Join(tlsDir, keyFile) + + args = append(args, []string{ + fmt.Sprintf("--tls.internal.server.cert-file=%s", certFilePath), + fmt.Sprintf("--tls.internal.server.key-file=%s", keyFilePath), + }...) + + uriScheme = corev1.URISchemeHTTPS + + volumeMounts = []corev1.VolumeMount{ + { + Name: sercretVolumeName, + ReadOnly: true, + MountPath: tlsDir, + }, + } + } + + for _, t := range defaultTenants { + args = append(args, fmt.Sprintf(`--openshift.mappings=%s=%s`, t, opaDefaultAPIGroup)) + } + + return corev1.Container{ + Name: opaContainerName, + Image: image, + Args: args, + Ports: []corev1.ContainerPort{ + { + Name: GatewayOPAHTTPPortName, + ContainerPort: GatewayOPAHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: GatewayOPAInternalPortName, + ContainerPort: GatewayOPAInternalPort, + Protocol: corev1.ProtocolTCP, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/live", + Port: intstr.FromInt(int(GatewayOPAInternalPort)), + Scheme: uriScheme, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.FromInt(int(GatewayOPAInternalPort)), + Scheme: uriScheme, + }, + }, + TimeoutSeconds: 1, + PeriodSeconds: 5, + FailureThreshold: 12, + }, + VolumeMounts: volumeMounts, + } +} diff --git a/operator/internal/manifests/openshift/options.go b/operator/internal/manifests/openshift/options.go new file mode 100644 index 0000000000..478fe8f1af --- /dev/null +++ b/operator/internal/manifests/openshift/options.go @@ -0,0 +1,109 @@ +package openshift + +import ( + "fmt" + "math/rand" + + "github.com/google/uuid" +) + +// Options is the set of internal template options for rendering +// the lokistack-gateway tenants configuration file when mode openshift-logging. +type Options struct { + BuildOpts BuildOptions + Authentication []AuthenticationSpec + Authorization AuthorizationSpec +} + +// AuthenticationSpec describes the authentication specification +// for a single tenant to authenticate it's subjects through OpenShift Auth. +type AuthenticationSpec struct { + TenantName string + TenantID string + ServiceAccount string + RedirectURL string + CookieSecret string +} + +// AuthorizationSpec describes the authorization specification +// for all tenants to authorize access for it's subjects through the +// opa-openshift sidecar. +type AuthorizationSpec struct { + OPAUrl string +} + +// BuildOptions represents the set of options required to build +// extra lokistack gateway k8s objects (e.g. ServiceAccount, Route, RBAC) +// on openshift. +type BuildOptions struct { + LokiStackName string + GatewayName string + GatewayNamespace string + GatewaySvcName string + GatewaySvcTargetPort string + Labels map[string]string + EnableCertificateSigningService bool +} + +// TenantData defines the existing tenantID and cookieSecret for lokistack reconcile. +type TenantData struct { + TenantID string + CookieSecret string +} + +// NewOptions returns an openshift options struct. +func NewOptions( + stackName string, + gwName, gwNamespace, gwBaseDomain, gwSvcName, gwPortName string, + gwLabels map[string]string, + enableCertSigningService bool, + tenantConfigMap map[string]TenantData, +) Options { + host := ingressHost(stackName, gwNamespace, gwBaseDomain) + + var authn []AuthenticationSpec + for _, name := range defaultTenants { + if tenantConfigMap != nil { + authn = append(authn, AuthenticationSpec{ + TenantName: name, + TenantID: tenantConfigMap[name].TenantID, + ServiceAccount: gwName, + RedirectURL: fmt.Sprintf("http://%s/openshift/%s/callback", host, name), + CookieSecret: tenantConfigMap[name].CookieSecret, + }) + } else { + authn = append(authn, AuthenticationSpec{ + TenantName: name, + TenantID: uuid.New().String(), + ServiceAccount: gwName, + RedirectURL: fmt.Sprintf("http://%s/openshift/%s/callback", host, name), + CookieSecret: newCookieSecret(), + }) + } + } + + return Options{ + BuildOpts: BuildOptions{ + LokiStackName: stackName, + GatewayName: gwName, + GatewayNamespace: gwNamespace, + GatewaySvcName: gwSvcName, + GatewaySvcTargetPort: gwPortName, + Labels: gwLabels, + EnableCertificateSigningService: enableCertSigningService, + }, + Authentication: authn, + Authorization: AuthorizationSpec{ + OPAUrl: fmt.Sprintf("http://localhost:%d/v1/data/%s/allow", GatewayOPAHTTPPort, opaDefaultPackage), + }, + } +} + +func newCookieSecret() string { + b := make([]rune, cookieSecretLength) + for i := range b { + b[i] = allowedRunes[rand.Intn(len(allowedRunes))] + } + + return string(b) +} diff --git a/operator/internal/manifests/openshift/rbac.go b/operator/internal/manifests/openshift/rbac.go new file mode 100644 index 0000000000..2c6b8ea0b0 --- /dev/null +++ b/operator/internal/manifests/openshift/rbac.go @@ -0,0 +1,77 @@ +package openshift + +import ( + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BuildClusterRole returns a k8s ClusterRole object for the +// lokistack gateway serviceaccount to allow creating: +// - TokenReviews to authenticate the user by bearer token. +// - SubjectAccessReview to authorize the user by bearer token. +// if having access to read/create logs. +func BuildClusterRole(opts Options) *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleName(opts), + Labels: opts.BuildOpts.Labels, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{ + "authentication.k8s.io", + }, + Resources: []string{ + "tokenreviews", + }, + Verbs: []string{ + "create", + }, + }, + { + APIGroups: []string{ + "authorization.k8s.io", + }, + Resources: []string{ + "subjectaccessreviews", + }, + Verbs: []string{ + "create", + }, + }, + }, + } +} + +// BuildClusterRoleBinding returns a k8s ClusterRoleBinding object for +// the lokistack gateway serviceaccount to grant access to: +// - rbac.authentication.k8s.io/TokenReviews +// - rbac.authorization.k8s.io/SubjectAccessReviews +func BuildClusterRoleBinding(opts Options) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRoleBinding", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: opts.BuildOpts.GatewayName, + Labels: opts.BuildOpts.Labels, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: clusterRoleName(opts), + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: serviceAccountName(opts), + Namespace: opts.BuildOpts.GatewayNamespace, + }, + }, + } +} diff --git a/operator/internal/manifests/openshift/route.go b/operator/internal/manifests/openshift/route.go new file mode 100644 index 0000000000..4b19815e52 --- /dev/null +++ b/operator/internal/manifests/openshift/route.go @@ -0,0 +1,35 @@ +package openshift + +import ( + routev1 "github.com/openshift/api/route/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// BuildRoute builds an OpenShift route object for the LokiStack Gateway +func BuildRoute(opts Options) client.Object { + return &routev1.Route{ + TypeMeta: metav1.TypeMeta{ + Kind: "Route", + APIVersion: routev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: routeName(opts), + Namespace: opts.BuildOpts.GatewayNamespace, + Labels: opts.BuildOpts.Labels, + }, + Spec: routev1.RouteSpec{ + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: opts.BuildOpts.GatewaySvcName, + Weight: pointer.Int32(100), + }, + Port: &routev1.RoutePort{ + TargetPort: intstr.FromString(opts.BuildOpts.GatewaySvcTargetPort), + }, + WildcardPolicy: routev1.WildcardPolicyNone, + }, + } +} diff --git a/operator/internal/manifests/openshift/service_ca.go b/operator/internal/manifests/openshift/service_ca.go new file mode 100644 index 0000000000..d637fe109d --- /dev/null +++ b/operator/internal/manifests/openshift/service_ca.go @@ -0,0 +1,26 @@ +package openshift + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BuildServiceCAConfigMap returns a k8s configmap for the LokiStack +// gateway serviceCA configmap. This configmap is used to configure +// the gateway to proxy server-side TLS encrypted requests to Loki. +func BuildServiceCAConfigMap(opts Options) *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + InjectCABundleKey: "true", + }, + Labels: opts.BuildOpts.Labels, + Name: serviceCABundleName(opts), + Namespace: opts.BuildOpts.GatewayNamespace, + }, + } +} diff --git a/operator/internal/manifests/openshift/serviceaccount.go b/operator/internal/manifests/openshift/serviceaccount.go new file mode 100644 index 0000000000..03f70af8c4 --- /dev/null +++ b/operator/internal/manifests/openshift/serviceaccount.go @@ -0,0 +1,27 @@ +package openshift + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// BuildServiceAccount returns a k8s object for the LokiStack Gateway +// serviceaccount. This ServiceAccount is used in parallel as an +// OpenShift OAuth Client. +func BuildServiceAccount(opts Options) client.Object { + return &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + Kind: "ServiceAccount", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Annotations: serviceAccountAnnotations(opts), + Labels: opts.BuildOpts.Labels, + Name: serviceAccountName(opts), + Namespace: opts.BuildOpts.GatewayNamespace, + }, + AutomountServiceAccountToken: pointer.Bool(true), + } +} diff --git a/operator/internal/manifests/openshift/serviceaccount_test.go b/operator/internal/manifests/openshift/serviceaccount_test.go new file mode 100644 index 0000000000..97cbaf1f0a --- /dev/null +++ b/operator/internal/manifests/openshift/serviceaccount_test.go @@ -0,0 +1,25 @@ +package openshift + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBuildServiceAccount_AnnotationsMatchDefaultTenants(t *testing.T) { + opts := NewOptions("abc", "abc", "efgh", "example.com", "abc", "abc", map[string]string{}, false, map[string]TenantData{}) + + sa := BuildServiceAccount(opts) + require.Len(t, sa.GetAnnotations(), len(defaultTenants)) + + var keys []string + for key := range sa.GetAnnotations() { + keys = append(keys, key) + } + + for _, name := range defaultTenants { + v := fmt.Sprintf("serviceaccounts.openshift.io/oauth-redirectreference.%s", name) + require.Contains(t, keys, v) + } +} diff --git a/operator/internal/manifests/openshift/var.go b/operator/internal/manifests/openshift/var.go new file mode 100644 index 0000000000..3bf984505f --- /dev/null +++ b/operator/internal/manifests/openshift/var.go @@ -0,0 +1,65 @@ +package openshift + +import ( + "fmt" +) + +var ( + // GatewayOPAHTTPPort is the HTTP port of the OpenPolicyAgent sidecar. + GatewayOPAHTTPPort int32 = 8082 + // GatewayOPAInternalPort is the HTTP metrics port of the OpenPolicyAgent sidecar. + GatewayOPAInternalPort int32 = 8083 + + // GatewayOPAHTTPPortName is the HTTP container port name of the OpenPolicyAgent sidecar. + GatewayOPAHTTPPortName = "public" + // GatewayOPAInternalPortName is the HTTP container metrics port name of the OpenPolicyAgent sidecar. + GatewayOPAInternalPortName = "opa-metrics" + + bearerTokenFile string = "/var/run/secrets/kubernetes.io/serviceaccount/token" + + cookieSecretLength = 32 + allowedRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + + defaultConfigMapMode = int32(420) + + // ServingCertKey is the annotation key for services used the + // cert-signing service to create a new key/cert pair signed + // by the service CA stored in a secret with the same name + // as the annotated service. + ServingCertKey = "service.beta.openshift.io/serving-cert-secret-name" + // InjectCABundleKey is the annotation key for configmaps used by the + // cert-signing service to inject the service CA into the annotated + // configmap. + InjectCABundleKey = "service.beta.openshift.io/inject-cabundle" +) + +func clusterRoleName(opts Options) string { + return opts.BuildOpts.GatewayName +} + +func ingressHost(stackName, namespace, baseDomain string) string { + return fmt.Sprintf("%s-%s.apps.%s", stackName, namespace, baseDomain) +} + +func routeName(opts Options) string { + return opts.BuildOpts.LokiStackName +} + +func serviceAccountName(opts Options) string { + return opts.BuildOpts.GatewayName +} + +func serviceCABundleName(opts Options) string { + return fmt.Sprintf("%s-ca-bundle", opts.BuildOpts.GatewayName) +} + +func serviceAccountAnnotations(opts Options) map[string]string { + a := make(map[string]string, len(opts.Authentication)) + for _, auth := range opts.Authentication { + key := fmt.Sprintf("serviceaccounts.openshift.io/oauth-redirectreference.%s", auth.TenantName) + value := fmt.Sprintf("{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"%s\"}}", routeName(opts)) + a[key] = value + } + + return a +} diff --git a/operator/internal/manifests/options.go b/operator/internal/manifests/options.go new file mode 100644 index 0000000000..c5abae072c --- /dev/null +++ b/operator/internal/manifests/options.go @@ -0,0 +1,55 @@ +package manifests + +import ( + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests/internal" + "github.com/grafana/loki-operator/internal/manifests/openshift" +) + +// Options is a set of configuration values to use when building manifests such as resource sizes, etc. +// Most of this should be provided - either directly or indirectly - by the user. +type Options struct { + Name string + Namespace string + Image string + GatewayImage string + GatewayBaseDomain string + ConfigSHA1 string + + Flags FeatureFlags + + Stack lokiv1beta1.LokiStackSpec + ResourceRequirements internal.ComponentResources + + ObjectStorage ObjectStorage + + OpenShiftOptions openshift.Options + TenantSecrets []*TenantSecrets + TenantConfigMap map[string]openshift.TenantData +} + +// ObjectStorage for storage config. +type ObjectStorage struct { + Endpoint string + Region string + Buckets string + AccessKeyID string + AccessKeySecret string +} + +// FeatureFlags contains flags that activate various features +type FeatureFlags struct { + EnableCertificateSigningService bool + EnableServiceMonitors bool + EnableTLSServiceMonitorConfig bool + EnableGateway bool + EnableGatewayRoute bool +} + +// TenantSecrets for clientID, clientSecret and issuerCAPath for tenant's authentication. +type TenantSecrets struct { + TenantName string + ClientID string + ClientSecret string + IssuerCAPath string +} diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go new file mode 100644 index 0000000000..e02a5b15f9 --- /dev/null +++ b/operator/internal/manifests/querier.go @@ -0,0 +1,218 @@ +package manifests + +import ( + "fmt" + "path" + + "github.com/grafana/loki-operator/internal/manifests/internal/config" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// BuildQuerier returns a list of k8s objects for Loki Querier +func BuildQuerier(opts Options) ([]client.Object, error) { + deployment := NewQuerierDeployment(opts) + if opts.Flags.EnableTLSServiceMonitorConfig { + if err := configureQuerierServiceMonitorPKI(deployment, opts.Name); err != nil { + return nil, err + } + } + + return []client.Object{ + deployment, + NewQuerierGRPCService(opts), + NewQuerierHTTPService(opts), + }, nil +} + +// NewQuerierDeployment creates a deployment object for a querier +func NewQuerierDeployment(opts Options) *appsv1.Deployment { + podSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: configVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: lokiConfigMapName(opts.Name), + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Image: opts.Image, + Name: "loki-querier", + Resources: corev1.ResourceRequirements{ + Limits: opts.ResourceRequirements.Querier.Limits, + Requests: opts.ResourceRequirements.Querier.Requests, + }, + Args: []string{ + "-target=querier", + fmt.Sprintf("-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), + fmt.Sprintf("-runtime-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + PeriodSeconds: 10, + InitialDelaySeconds: 15, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/metrics", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + SuccessThreshold: 1, + }, + Ports: []corev1.ContainerPort{ + { + Name: lokiHTTPPortName, + ContainerPort: httpPort, + Protocol: protocolTCP, + }, + { + Name: lokiGRPCPortName, + ContainerPort: grpcPort, + Protocol: protocolTCP, + }, + { + Name: lokiGossipPortName, + ContainerPort: gossipPort, + Protocol: protocolTCP, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: configVolumeName, + ReadOnly: false, + MountPath: config.LokiConfigMountDir, + }, + }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "File", + ImagePullPolicy: "IfNotPresent", + }, + }, + } + + if opts.Stack.Template != nil && opts.Stack.Template.Querier != nil { + podSpec.Tolerations = opts.Stack.Template.Querier.Tolerations + podSpec.NodeSelector = opts.Stack.Template.Querier.NodeSelector + } + + l := ComponentLabels(LabelQuerierComponent, opts.Name) + a := commonAnnotations(opts.ConfigSHA1) + + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: QuerierName(opts.Name), + Labels: l, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32Ptr(opts.Stack.Template.Querier.Replicas), + Selector: &metav1.LabelSelector{ + MatchLabels: labels.Merge(l, GossipLabels()), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("loki-querier-%s", opts.Name), + Labels: labels.Merge(l, GossipLabels()), + Annotations: a, + }, + Spec: podSpec, + }, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + }, + }, + } +} + +// NewQuerierGRPCService creates a k8s service for the querier GRPC endpoint +func NewQuerierGRPCService(opts Options) *corev1.Service { + l := ComponentLabels(LabelQuerierComponent, opts.Name) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceNameQuerierGRPC(opts.Name), + Labels: l, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: lokiGRPCPortName, + Port: grpcPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: grpcPort}, + }, + }, + Selector: l, + }, + } +} + +// NewQuerierHTTPService creates a k8s service for the querier HTTP endpoint +func NewQuerierHTTPService(opts Options) *corev1.Service { + serviceName := serviceNameQuerierHTTP(opts.Name) + l := ComponentLabels(LabelQuerierComponent, opts.Name) + a := serviceAnnotations(serviceName, opts.Flags.EnableCertificateSigningService) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Labels: l, + Annotations: a, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: lokiHTTPPortName, + Port: httpPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: httpPort}, + }, + }, + Selector: l, + }, + } +} + +func configureQuerierServiceMonitorPKI(deployment *appsv1.Deployment, stackName string) error { + serviceName := serviceNameQuerierHTTP(stackName) + return configureServiceMonitorPKI(&deployment.Spec.Template.Spec, serviceName) +} diff --git a/operator/internal/manifests/querier_test.go b/operator/internal/manifests/querier_test.go new file mode 100644 index 0000000000..dce9c1858b --- /dev/null +++ b/operator/internal/manifests/querier_test.go @@ -0,0 +1,57 @@ +package manifests_test + +import ( + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/stretchr/testify/require" +) + +func TestNewQuerierDeployment_HasTemplateConfigHashAnnotation(t *testing.T) { + ss := manifests.NewQuerierDeployment(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + ConfigSHA1: "deadbeef", + Stack: lokiv1beta1.LokiStackSpec{ + StorageClassName: "standard", + Template: &lokiv1beta1.LokiTemplateSpec{ + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + + expected := "loki.grafana.com/config-hash" + annotations := ss.Spec.Template.Annotations + require.Contains(t, annotations, expected) + require.Equal(t, annotations[expected], "deadbeef") +} + +func TestNewQuerierDeployment_SelectorMatchesLabels(t *testing.T) { + // You must set the .spec.selector field of a Deployment to match the labels of + // its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the + // .spec.selector field was defaulted when omitted. In 1.8 and later versions, + // failing to specify a matching Pod Selector will result in a validation error + // during Deployment creation. + // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector + ss := manifests.NewQuerierDeployment(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + Stack: lokiv1beta1.LokiStackSpec{ + StorageClassName: "standard", + Template: &lokiv1beta1.LokiTemplateSpec{ + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + + l := ss.Spec.Template.GetObjectMeta().GetLabels() + for key, value := range ss.Spec.Selector.MatchLabels { + require.Contains(t, l, key) + require.Equal(t, l[key], value) + } +} diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go new file mode 100644 index 0000000000..a50b94f5b9 --- /dev/null +++ b/operator/internal/manifests/query-frontend.go @@ -0,0 +1,224 @@ +package manifests + +import ( + "fmt" + "path" + + "github.com/grafana/loki-operator/internal/manifests/internal/config" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// BuildQueryFrontend returns a list of k8s objects for Loki QueryFrontend +func BuildQueryFrontend(opts Options) ([]client.Object, error) { + deployment := NewQueryFrontendDeployment(opts) + if opts.Flags.EnableTLSServiceMonitorConfig { + if err := configureQueryFrontendServiceMonitorPKI(deployment, opts.Name); err != nil { + return nil, err + } + } + + return []client.Object{ + deployment, + NewQueryFrontendGRPCService(opts), + NewQueryFrontendHTTPService(opts), + }, nil +} + +// NewQueryFrontendDeployment creates a deployment object for a query-frontend +func NewQueryFrontendDeployment(opts Options) *appsv1.Deployment { + podSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: configVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: lokiConfigMapName(opts.Name), + }, + }, + }, + }, + { + Name: storageVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + Containers: []corev1.Container{ + { + Image: opts.Image, + Name: "loki-query-frontend", + Resources: corev1.ResourceRequirements{ + Limits: opts.ResourceRequirements.QueryFrontend.Limits, + Requests: opts.ResourceRequirements.QueryFrontend.Requests, + }, + Args: []string{ + "-target=query-frontend", + fmt.Sprintf("-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), + fmt.Sprintf("-runtime-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/metrics", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + PeriodSeconds: 10, + InitialDelaySeconds: 15, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/metrics", + Port: intstr.FromInt(httpPort), + Scheme: corev1.URISchemeHTTP, + }, + }, + TimeoutSeconds: 2, + PeriodSeconds: 30, + FailureThreshold: 10, + SuccessThreshold: 1, + }, + Ports: []corev1.ContainerPort{ + { + Name: lokiHTTPPortName, + ContainerPort: httpPort, + Protocol: protocolTCP, + }, + { + Name: lokiGRPCPortName, + ContainerPort: grpcPort, + Protocol: protocolTCP, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: configVolumeName, + ReadOnly: false, + MountPath: config.LokiConfigMountDir, + }, + { + Name: storageVolumeName, + ReadOnly: false, + MountPath: dataDirectory, + }, + }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "File", + ImagePullPolicy: "IfNotPresent", + }, + }, + } + + if opts.Stack.Template != nil && opts.Stack.Template.QueryFrontend != nil { + podSpec.Tolerations = opts.Stack.Template.QueryFrontend.Tolerations + podSpec.NodeSelector = opts.Stack.Template.QueryFrontend.NodeSelector + } + + l := ComponentLabels(LabelQueryFrontendComponent, opts.Name) + a := commonAnnotations(opts.ConfigSHA1) + + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: QueryFrontendName(opts.Name), + Labels: l, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32Ptr(opts.Stack.Template.QueryFrontend.Replicas), + Selector: &metav1.LabelSelector{ + MatchLabels: labels.Merge(l, GossipLabels()), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("loki-query-frontend-%s", opts.Name), + Labels: labels.Merge(l, GossipLabels()), + Annotations: a, + }, + Spec: podSpec, + }, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + }, + }, + } +} + +// NewQueryFrontendGRPCService creates a k8s service for the query-frontend GRPC endpoint +func NewQueryFrontendGRPCService(opts Options) *corev1.Service { + l := ComponentLabels(LabelQueryFrontendComponent, opts.Name) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceNameQueryFrontendGRPC(opts.Name), + Labels: l, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: lokiGRPCPortName, + Port: grpcPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: grpcPort}, + }, + }, + Selector: l, + }, + } +} + +// NewQueryFrontendHTTPService creates a k8s service for the query-frontend HTTP endpoint +func NewQueryFrontendHTTPService(opts Options) *corev1.Service { + serviceName := serviceNameQueryFrontendHTTP(opts.Name) + l := ComponentLabels(LabelQueryFrontendComponent, opts.Name) + a := serviceAnnotations(serviceName, opts.Flags.EnableCertificateSigningService) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Labels: l, + Annotations: a, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: lokiHTTPPortName, + Port: httpPort, + Protocol: protocolTCP, + TargetPort: intstr.IntOrString{IntVal: httpPort}, + }, + }, + Selector: l, + }, + } +} + +func configureQueryFrontendServiceMonitorPKI(deployment *appsv1.Deployment, stackName string) error { + serviceName := serviceNameQueryFrontendHTTP(stackName) + return configureServiceMonitorPKI(&deployment.Spec.Template.Spec, serviceName) +} diff --git a/operator/internal/manifests/query-frontend_test.go b/operator/internal/manifests/query-frontend_test.go new file mode 100644 index 0000000000..bf019ddfe5 --- /dev/null +++ b/operator/internal/manifests/query-frontend_test.go @@ -0,0 +1,47 @@ +package manifests_test + +import ( + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/stretchr/testify/require" +) + +func TestNewQueryFrontendDeployment_SelectorMatchesLabels(t *testing.T) { + ss := manifests.NewQueryFrontendDeployment(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + l := ss.Spec.Template.GetObjectMeta().GetLabels() + for key, value := range ss.Spec.Selector.MatchLabels { + require.Contains(t, l, key) + require.Equal(t, l[key], value) + } +} + +func TestNewQueryFrontendDeployment_HasTemplateConfigHashAnnotation(t *testing.T) { + ss := manifests.NewQueryFrontendDeployment(manifests.Options{ + Name: "abcd", + Namespace: "efgh", + ConfigSHA1: "deadbeef", + Stack: lokiv1beta1.LokiStackSpec{ + Template: &lokiv1beta1.LokiTemplateSpec{ + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + }) + expected := "loki.grafana.com/config-hash" + annotations := ss.Spec.Template.Annotations + require.Contains(t, annotations, expected) + require.Equal(t, annotations[expected], "deadbeef") +} diff --git a/operator/internal/manifests/service_monitor.go b/operator/internal/manifests/service_monitor.go new file mode 100644 index 0000000000..aeecc780b3 --- /dev/null +++ b/operator/internal/manifests/service_monitor.go @@ -0,0 +1,193 @@ +package manifests + +import ( + "github.com/ViaQ/logerr/kverrors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/imdario/mergo" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// BuildServiceMonitors builds the service monitors +func BuildServiceMonitors(opts Options) []client.Object { + return []client.Object{ + NewDistributorServiceMonitor(opts), + NewIngesterServiceMonitor(opts), + NewQuerierServiceMonitor(opts), + NewCompactorServiceMonitor(opts), + NewQueryFrontendServiceMonitor(opts), + NewIndexGatewayServiceMonitor(opts), + NewGatewayServiceMonitor(opts), + } +} + +// NewDistributorServiceMonitor creates a k8s service monitor for the distributor component +func NewDistributorServiceMonitor(opts Options) *monitoringv1.ServiceMonitor { + l := ComponentLabels(LabelDistributorComponent, opts.Name) + + serviceMonitorName := serviceMonitorName(DistributorName(opts.Name)) + serviceName := serviceNameDistributorHTTP(opts.Name) + lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig) + + return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint) +} + +// NewIngesterServiceMonitor creates a k8s service monitor for the ingester component +func NewIngesterServiceMonitor(opts Options) *monitoringv1.ServiceMonitor { + l := ComponentLabels(LabelIngesterComponent, opts.Name) + + serviceMonitorName := serviceMonitorName(IngesterName(opts.Name)) + serviceName := serviceNameIngesterHTTP(opts.Name) + lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig) + + return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint) +} + +// NewQuerierServiceMonitor creates a k8s service monitor for the querier component +func NewQuerierServiceMonitor(opts Options) *monitoringv1.ServiceMonitor { + l := ComponentLabels(LabelQuerierComponent, opts.Name) + + serviceMonitorName := serviceMonitorName(QuerierName(opts.Name)) + serviceName := serviceNameQuerierHTTP(opts.Name) + lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig) + + return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint) +} + +// NewCompactorServiceMonitor creates a k8s service monitor for the compactor component +func NewCompactorServiceMonitor(opts Options) *monitoringv1.ServiceMonitor { + l := ComponentLabels(LabelCompactorComponent, opts.Name) + + serviceMonitorName := serviceMonitorName(CompactorName(opts.Name)) + serviceName := serviceNameCompactorHTTP(opts.Name) + lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig) + + return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint) +} + +// NewQueryFrontendServiceMonitor creates a k8s service monitor for the query-frontend component +func NewQueryFrontendServiceMonitor(opts Options) *monitoringv1.ServiceMonitor { + l := ComponentLabels(LabelQueryFrontendComponent, opts.Name) + + serviceMonitorName := serviceMonitorName(QueryFrontendName(opts.Name)) + serviceName := serviceNameQueryFrontendHTTP(opts.Name) + lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig) + + return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint) +} + +// NewIndexGatewayServiceMonitor creates a k8s service monitor for the index-gateway component +func NewIndexGatewayServiceMonitor(opts Options) *monitoringv1.ServiceMonitor { + l := ComponentLabels(LabelIndexGatewayComponent, opts.Name) + + serviceMonitorName := serviceMonitorName(IndexGatewayName(opts.Name)) + serviceName := serviceNameIndexGatewayHTTP(opts.Name) + lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig) + + return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint) +} + +// NewGatewayServiceMonitor creates a k8s service monitor for the lokistack-gateway component +func NewGatewayServiceMonitor(opts Options) *monitoringv1.ServiceMonitor { + l := ComponentLabels(LabelGatewayComponent, opts.Name) + + serviceMonitorName := serviceMonitorName(GatewayName(opts.Name)) + serviceName := serviceNameGatewayHTTP(opts.Name) + gwEndpoint := serviceMonitorEndpoint(gatewayInternalPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig) + + sm := newServiceMonitor(opts.Namespace, serviceMonitorName, l, gwEndpoint) + + if opts.Stack.Tenants != nil { + if err := configureServiceMonitorForMode(sm, opts.Stack.Tenants.Mode, opts.Flags); err != nil { + return sm + } + } + + return sm +} + +func newServiceMonitor(namespace, serviceMonitorName string, labels labels.Set, endpoint monitoringv1.Endpoint) *monitoringv1.ServiceMonitor { + return &monitoringv1.ServiceMonitor{ + TypeMeta: metav1.TypeMeta{ + Kind: monitoringv1.ServiceMonitorsKind, + APIVersion: monitoringv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceMonitorName, + Namespace: namespace, + Labels: labels, + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: labelJobComponent, + Endpoints: []monitoringv1.Endpoint{endpoint}, + Selector: metav1.LabelSelector{ + MatchLabels: labels, + }, + NamespaceSelector: monitoringv1.NamespaceSelector{ + MatchNames: []string{namespace}, + }, + }, + } +} + +func configureServiceMonitorPKI(podSpec *corev1.PodSpec, serviceName string) error { + secretName := signingServiceSecretName(serviceName) + secretVolumeSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: secretName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }, + }, + } + secretContainerSpec := corev1.Container{ + VolumeMounts: []corev1.VolumeMount{ + { + Name: secretName, + ReadOnly: false, + MountPath: secretDirectory, + }, + }, + Args: []string{ + "-server.http-tls-cert-path=/etc/proxy/secrets/tls.crt", + "-server.http-tls-key-path=/etc/proxy/secrets/tls.key", + }, + } + uriSchemeContainerSpec := corev1.Container{ + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Scheme: corev1.URISchemeHTTPS, + }, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Scheme: corev1.URISchemeHTTPS, + }, + }, + }, + } + + if err := mergo.Merge(podSpec, secretVolumeSpec, mergo.WithAppendSlice); err != nil { + return kverrors.Wrap(err, "failed to merge volumes") + } + + if err := mergo.Merge(&podSpec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil { + return kverrors.Wrap(err, "failed to merge container") + } + + if err := mergo.Merge(&podSpec.Containers[0], uriSchemeContainerSpec, mergo.WithOverride); err != nil { + return kverrors.Wrap(err, "failed to merge container") + } + + return nil +} diff --git a/operator/internal/manifests/service_monitor_test.go b/operator/internal/manifests/service_monitor_test.go new file mode 100644 index 0000000000..31c879fa78 --- /dev/null +++ b/operator/internal/manifests/service_monitor_test.go @@ -0,0 +1,135 @@ +package manifests + +import ( + "fmt" + "testing" + + corev1 "k8s.io/api/core/v1" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test that all serviceMonitor match the labels of their services so that we know all serviceMonitor +// will work when deployed. +func TestServiceMonitorMatchLabels(t *testing.T) { + type test struct { + Service *corev1.Service + ServiceMonitor *monitoringv1.ServiceMonitor + } + + flags := FeatureFlags{ + EnableCertificateSigningService: true, + EnableServiceMonitors: true, + EnableTLSServiceMonitorConfig: true, + } + + opt := Options{ + Name: "test", + Namespace: "test", + Image: "test", + Flags: flags, + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + } + + table := []test{ + { + Service: NewDistributorHTTPService(opt), + ServiceMonitor: NewDistributorServiceMonitor(opt), + }, + { + Service: NewIngesterHTTPService(opt), + ServiceMonitor: NewIngesterServiceMonitor(opt), + }, + { + Service: NewQuerierHTTPService(opt), + ServiceMonitor: NewQuerierServiceMonitor(opt), + }, + { + Service: NewQueryFrontendHTTPService(opt), + ServiceMonitor: NewQueryFrontendServiceMonitor(opt), + }, + { + Service: NewCompactorHTTPService(opt), + ServiceMonitor: NewCompactorServiceMonitor(opt), + }, + { + Service: NewGatewayHTTPService(opt), + ServiceMonitor: NewGatewayServiceMonitor(opt), + }, + { + Service: NewIndexGatewayHTTPService(opt), + ServiceMonitor: NewIndexGatewayServiceMonitor(opt), + }, + } + + for _, tst := range table { + testName := fmt.Sprintf("%s_%s", tst.Service.GetName(), tst.ServiceMonitor.GetName()) + t.Run(testName, func(t *testing.T) { + t.Parallel() + for k, v := range tst.ServiceMonitor.Spec.Selector.MatchLabels { + if assert.Contains(t, tst.Service.Spec.Selector, k) { + // only assert Equal if the previous assertion is successful or this will panic + assert.Equal(t, v, tst.Service.Spec.Selector[k]) + } + } + }) + } +} + +func TestServiceMonitorEndpoints_ForOpenShiftLoggingMode(t *testing.T) { + flags := FeatureFlags{ + EnableGateway: true, + EnableCertificateSigningService: true, + EnableServiceMonitors: true, + EnableTLSServiceMonitorConfig: true, + } + + opt := Options{ + Name: "test", + Namespace: "test", + Image: "test", + Flags: flags, + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Tenants: &lokiv1beta1.TenantsSpec{ + Mode: lokiv1beta1.OpenshiftLogging, + }, + Template: &lokiv1beta1.LokiTemplateSpec{ + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + } + + sm := NewGatewayServiceMonitor(opt) + require.Len(t, sm.Spec.Endpoints, 2) +} diff --git a/operator/internal/manifests/service_test.go b/operator/internal/manifests/service_test.go new file mode 100644 index 0000000000..2bd0b07ead --- /dev/null +++ b/operator/internal/manifests/service_test.go @@ -0,0 +1,239 @@ +package manifests + +import ( + "fmt" + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Test that the service ports have matching deployment/statefulset/daemonset ports on the podspec. +func TestServicesMatchPorts(t *testing.T) { + type test struct { + Services []*corev1.Service + Containers []corev1.Container + } + opt := Options{ + Name: "test", + Namespace: "test", + Image: "test", + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + } + sha1C := "deadbef" + + table := []test{ + { + Containers: NewDistributorDeployment(opt).Spec.Template.Spec.Containers, + Services: []*corev1.Service{ + NewDistributorGRPCService(opt), + NewDistributorHTTPService(opt), + }, + }, + { + Containers: NewIngesterStatefulSet(opt).Spec.Template.Spec.Containers, + Services: []*corev1.Service{ + NewIngesterGRPCService(opt), + NewIngesterHTTPService(opt), + }, + }, + { + Containers: NewQuerierDeployment(opt).Spec.Template.Spec.Containers, + Services: []*corev1.Service{ + NewQuerierGRPCService(opt), + NewQuerierHTTPService(opt), + }, + }, + { + Containers: NewQueryFrontendDeployment(opt).Spec.Template.Spec.Containers, + Services: []*corev1.Service{ + NewQueryFrontendGRPCService(opt), + NewQueryFrontendHTTPService(opt), + }, + }, + { + Containers: NewCompactorStatefulSet(opt).Spec.Template.Spec.Containers, + Services: []*corev1.Service{ + NewCompactorGRPCService(opt), + NewCompactorHTTPService(opt), + }, + }, + { + Containers: NewGatewayDeployment(opt, sha1C).Spec.Template.Spec.Containers, + Services: []*corev1.Service{ + NewGatewayHTTPService(opt), + }, + }, + { + Containers: NewIndexGatewayStatefulSet(opt).Spec.Template.Spec.Containers, + Services: []*corev1.Service{ + NewIndexGatewayGRPCService(opt), + NewIndexGatewayHTTPService(opt), + }, + }, + } + + containerHasPort := func(containers []corev1.Container, port int32) bool { + for _, container := range containers { + for _, p := range container.Ports { + if p.ContainerPort == port { + return true + } + } + } + return false + } + + for _, tst := range table { + for _, service := range tst.Services { + for _, port := range service.Spec.Ports { + // rescope for t.Parallel + tst, service, port := tst, service, port + testName := fmt.Sprintf("%s_%d", service.GetName(), port.Port) + t.Run(testName, func(t *testing.T) { + t.Parallel() + found := containerHasPort(tst.Containers, port.Port) + assert.True(t, found, "Service port (%d) does not match any port in the defined containers", port.Port) + }) + } + } + } +} + +// Test that all services match the labels of their deployments/statefulsets so that we know all services will +// work when deployed. +func TestServicesMatchLabels(t *testing.T) { + type test struct { + Services []*corev1.Service + Object client.Object + } + + opt := Options{ + Name: "test", + Namespace: "test", + Image: "test", + Stack: lokiv1beta1.LokiStackSpec{ + Size: lokiv1beta1.SizeOneXExtraSmall, + Template: &lokiv1beta1.LokiTemplateSpec{ + Compactor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Distributor: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Ingester: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Querier: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + QueryFrontend: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + Gateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + IndexGateway: &lokiv1beta1.LokiComponentSpec{ + Replicas: 1, + }, + }, + }, + } + sha1C := "deadbef" + + table := []test{ + { + Object: NewDistributorDeployment(opt), + Services: []*corev1.Service{ + NewDistributorGRPCService(opt), + NewDistributorHTTPService(opt), + }, + }, + { + Object: NewIngesterStatefulSet(opt), + Services: []*corev1.Service{ + NewIngesterGRPCService(opt), + NewIngesterHTTPService(opt), + }, + }, + { + Object: NewQuerierDeployment(opt), + Services: []*corev1.Service{ + NewQuerierGRPCService(opt), + NewQuerierHTTPService(opt), + }, + }, + { + Object: NewQueryFrontendDeployment(opt), + Services: []*corev1.Service{ + NewQueryFrontendGRPCService(opt), + NewQueryFrontendHTTPService(opt), + }, + }, + { + Object: NewCompactorStatefulSet(opt), + Services: []*corev1.Service{ + NewCompactorGRPCService(opt), + NewCompactorHTTPService(opt), + }, + }, + { + Object: NewGatewayDeployment(opt, sha1C), + Services: []*corev1.Service{ + NewGatewayHTTPService(opt), + }, + }, + { + Object: NewIndexGatewayStatefulSet(opt), + Services: []*corev1.Service{ + NewIndexGatewayGRPCService(opt), + NewIndexGatewayHTTPService(opt), + }, + }, + } + + for _, tst := range table { + for _, service := range tst.Services { + // rescope for t.Parallel() + tst, service := tst, service + + testName := fmt.Sprintf("%s_%s", tst.Object.GetName(), service.GetName()) + t.Run(testName, func(t *testing.T) { + t.Parallel() + for k, v := range service.Spec.Selector { + if assert.Contains(t, tst.Object.GetLabels(), k) { + // only assert Equal if the previous assertion is successful or this will panic + assert.Equal(t, v, tst.Object.GetLabels()[k]) + } + } + }) + } + } +} diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go new file mode 100644 index 0000000000..5be8651d2c --- /dev/null +++ b/operator/internal/manifests/var.go @@ -0,0 +1,232 @@ +package manifests + +import ( + "fmt" + + "github.com/grafana/loki-operator/internal/manifests/openshift" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +const ( + gossipPort = 7946 + httpPort = 3100 + grpcPort = 9095 + protocolTCP = "TCP" + + lokiHTTPPortName = "metrics" + lokiGRPCPortName = "grpc" + lokiGossipPortName = "gossip-ring" + + gatewayContainerName = "gateway" + gatewayHTTPPort = 8080 + gatewayInternalPort = 8081 + gatewayHTTPPortName = "public" + gatewayInternalPortName = "metrics" + + // EnvRelatedImageLoki is the environment variable to fetch the Loki image pullspec. + EnvRelatedImageLoki = "RELATED_IMAGE_LOKI" + // EnvRelatedImageGateway is the environment variable to fetch the Gateway image pullspec. + EnvRelatedImageGateway = "RELATED_IMAGE_GATEWAY" + + // DefaultContainerImage declares the default fallback for loki image. + DefaultContainerImage = "docker.io/grafana/loki:2.4.1" + + // DefaultLokiStackGatewayImage declares the default image for lokiStack-gateway. + DefaultLokiStackGatewayImage = "quay.io/observatorium/api:latest" + + // PrometheusCAFile declares the path for prometheus CA file for service monitors. + PrometheusCAFile string = "/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt" + // BearerTokenFile declares the path for bearer token file for service monitors. + BearerTokenFile string = "/var/run/secrets/kubernetes.io/serviceaccount/token" + + // labelJobComponent is a ServiceMonitor.Spec.JobLabel. + labelJobComponent string = "loki.grafana.com/component" + + // LabelCompactorComponent is the label value for the compactor component + LabelCompactorComponent string = "compactor" + // LabelDistributorComponent is the label value for the distributor component + LabelDistributorComponent string = "distributor" + // LabelIngesterComponent is the label value for the ingester component + LabelIngesterComponent string = "ingester" + // LabelQuerierComponent is the label value for the querier component + LabelQuerierComponent string = "querier" + // LabelQueryFrontendComponent is the label value for the query frontend component + LabelQueryFrontendComponent string = "query-frontend" + // LabelIndexGatewayComponent is the label value for the lokiStack-index-gateway component + LabelIndexGatewayComponent string = "index-gateway" + // LabelGatewayComponent is the label value for the lokiStack-gateway component + LabelGatewayComponent string = "lokistack-gateway" +) + +var ( + defaultConfigMapMode = int32(420) + volumeFileSystemMode = corev1.PersistentVolumeFilesystem +) + +func commonAnnotations(h string) map[string]string { + return map[string]string{ + "loki.grafana.com/config-hash": h, + } +} + +func commonLabels(stackName string) map[string]string { + return map[string]string{ + "app.kubernetes.io/name": "loki", + "app.kubernetes.io/provider": "openshift", + "loki.grafana.com/name": stackName, + } +} + +func serviceAnnotations(serviceName string, enableSigningService bool) map[string]string { + annotations := map[string]string{} + if enableSigningService { + annotations[openshift.ServingCertKey] = signingServiceSecretName(serviceName) + } + return annotations +} + +// ComponentLabels is a list of all commonLabels including the loki.grafana.com/component: label +func ComponentLabels(component, stackName string) labels.Set { + return labels.Merge(commonLabels(stackName), map[string]string{ + "loki.grafana.com/component": component, + }) +} + +// GossipLabels is the list of labels that should be assigned to components using the gossip ring +func GossipLabels() map[string]string { + return map[string]string{ + "loki.grafana.com/gossip": "true", + } +} + +// CompactorName is the name of the compactor statefulset +func CompactorName(stackName string) string { + return fmt.Sprintf("loki-compactor-%s", stackName) +} + +// DistributorName is the name of the distibutor deployment +func DistributorName(stackName string) string { + return fmt.Sprintf("loki-distributor-%s", stackName) +} + +// IngesterName is the name of the compactor statefulset +func IngesterName(stackName string) string { + return fmt.Sprintf("loki-ingester-%s", stackName) +} + +// QuerierName is the name of the querier deployment +func QuerierName(stackName string) string { + return fmt.Sprintf("loki-querier-%s", stackName) +} + +// QueryFrontendName is the name of the query-frontend statefulset +func QueryFrontendName(stackName string) string { + return fmt.Sprintf("loki-query-frontend-%s", stackName) +} + +// IndexGatewayName is the name of the index-gateway statefulset +func IndexGatewayName(stackName string) string { + return fmt.Sprintf("loki-index-gateway-%s", stackName) +} + +// GatewayName is the name of the lokiStack-gateway statefulset +func GatewayName(stackName string) string { + return fmt.Sprintf("lokistack-gateway-%s", stackName) +} + +func serviceNameQuerierHTTP(stackName string) string { + return fmt.Sprintf("loki-querier-http-%s", stackName) +} + +func serviceNameQuerierGRPC(stackName string) string { + return fmt.Sprintf("loki-querier-grpc-%s", stackName) +} + +func serviceNameIngesterGRPC(stackName string) string { + return fmt.Sprintf("loki-ingester-grpc-%s", stackName) +} + +func serviceNameIngesterHTTP(stackName string) string { + return fmt.Sprintf("loki-ingester-http-%s", stackName) +} + +func serviceNameDistributorGRPC(stackName string) string { + return fmt.Sprintf("loki-distributor-grpc-%s", stackName) +} + +func serviceNameDistributorHTTP(stackName string) string { + return fmt.Sprintf("loki-distributor-http-%s", stackName) +} + +func serviceNameCompactorGRPC(stackName string) string { + return fmt.Sprintf("loki-compactor-grpc-%s", stackName) +} + +func serviceNameCompactorHTTP(stackName string) string { + return fmt.Sprintf("loki-compactor-http-%s", stackName) +} + +func serviceNameQueryFrontendGRPC(stackName string) string { + return fmt.Sprintf("loki-query-frontend-grpc-%s", stackName) +} + +func serviceNameQueryFrontendHTTP(stackName string) string { + return fmt.Sprintf("loki-query-frontend-http-%s", stackName) +} + +func serviceNameIndexGatewayHTTP(stackName string) string { + return fmt.Sprintf("loki-index-gateway-http-%s", stackName) +} + +func serviceNameIndexGatewayGRPC(stackName string) string { + return fmt.Sprintf("loki-index-gateway-grpc-%s", stackName) +} + +func serviceNameGatewayHTTP(stackName string) string { + return fmt.Sprintf("lokistack-gateway-http-%s", stackName) +} + +func serviceMonitorName(componentName string) string { + return fmt.Sprintf("monitor-%s", componentName) +} + +func signingServiceSecretName(serviceName string) string { + return fmt.Sprintf("%s-metrics", serviceName) +} + +func fqdn(serviceName, namespace string) string { + return fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, namespace) +} + +// serviceMonitorTLSConfig returns the TLS configuration for service monitors. +func serviceMonitorTLSConfig(serviceName, namespace string) monitoringv1.TLSConfig { + return monitoringv1.TLSConfig{ + SafeTLSConfig: monitoringv1.SafeTLSConfig{ + // ServerName can be e.g. loki-distributor-http.openshift-logging.svc.cluster.local + ServerName: fqdn(serviceName, namespace), + }, + CAFile: PrometheusCAFile, + } +} + +// serviceMonitorEndpoint returns the lokistack endpoint for service monitors. +func serviceMonitorEndpoint(portName, serviceName, namespace string, enableTLS bool) monitoringv1.Endpoint { + if enableTLS { + tlsConfig := serviceMonitorTLSConfig(serviceName, namespace) + return monitoringv1.Endpoint{ + Port: portName, + Path: "/metrics", + Scheme: "https", + BearerTokenFile: BearerTokenFile, + TLSConfig: &tlsConfig, + } + } + + return monitoringv1.Endpoint{ + Port: portName, + Path: "/metrics", + Scheme: "http", + } +} diff --git a/operator/internal/metrics/metrics.go b/operator/internal/metrics/metrics.go new file mode 100644 index 0000000000..7cd2f8642d --- /dev/null +++ b/operator/internal/metrics/metrics.go @@ -0,0 +1,161 @@ +package metrics + +import ( + "reflect" + + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/manifests" +) + +// UserDefinedLimitsType defines a label that describes the type of limits +// imposed on the cluster +type UserDefinedLimitsType string + +const ( + labelGlobal UserDefinedLimitsType = "global" + labelTenant UserDefinedLimitsType = "tenant" +) + +var ( + deploymentMetric = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "lokistack_deployments", + Help: "Number of clusters that are deployed", + }, + []string{"size", "stack_id"}, + ) + + userDefinedLimitsMetric = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "lokistack_user_defined_limits", + Help: "Number of clusters that are using user defined limits", + }, + []string{"size", "stack_id", "type"}, + ) + + globalStreamLimitMetric = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "lokistack_global_stream_limit", + Help: "Sum of stream limits used globally by the ingesters", + }, + []string{"size", "stack_id"}, + ) + + averageTenantStreamLimitMetric = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "lokistack_avg_stream_limit_per_tenant", + Help: "Sum of stream limits used for defined tenants by the ingesters", + }, + []string{"size", "stack_id"}, + ) +) + +// RegisterMetricCollectors registers the prometheus collectors with the k8 default metrics +func RegisterMetricCollectors() { + metricCollectors := []prometheus.Collector{ + deploymentMetric, + userDefinedLimitsMetric, + globalStreamLimitMetric, + averageTenantStreamLimitMetric, + } + + for _, collector := range metricCollectors { + metrics.Registry.MustRegister(collector) + } +} + +// Collect takes metrics based on the spec +func Collect(spec *lokiv1beta1.LokiStackSpec, stackName string) { + defaultSpec := manifests.DefaultLokiStackSpec(spec.Size) + sizes := []lokiv1beta1.LokiStackSizeType{lokiv1beta1.SizeOneXSmall, lokiv1beta1.SizeOneXMedium} + + for _, size := range sizes { + var ( + globalRate float64 = 0 + tenantRate float64 = 0 + isUsingSize = false + isUsingTenantLimits = false + isUsingCustomGlobalLimits = false + ) + + if spec.Size == size { + isUsingSize = true + + if !reflect.DeepEqual(spec.Limits.Global, defaultSpec.Limits.Global) { + isUsingCustomGlobalLimits = true + } + + if len(spec.Limits.Tenants) != 0 { + isUsingTenantLimits = true + } + + if ingesters := spec.Template.Ingester.Replicas; ingesters > 0 { + tenantRate = streamRate(spec.Limits.Tenants, ingesters) + globalRate = float64(spec.Limits.Global.IngestionLimits.MaxGlobalStreamsPerTenant / ingesters) + } + } + + setDeploymentMetric(size, stackName, isUsingSize) + setUserDefinedLimitsMetric(size, stackName, labelGlobal, isUsingCustomGlobalLimits) + setUserDefinedLimitsMetric(size, stackName, labelTenant, isUsingTenantLimits) + setGlobalStreamLimitMetric(size, stackName, globalRate) + setAverageTenantStreamLimitMetric(size, stackName, tenantRate) + } +} + +func setDeploymentMetric(size lokiv1beta1.LokiStackSizeType, identifier string, active bool) { + deploymentMetric.With(prometheus.Labels{ + "size": string(size), + "stack_id": identifier, + }).Set(boolValue(active)) +} + +func setUserDefinedLimitsMetric(size lokiv1beta1.LokiStackSizeType, identifier string, limitType UserDefinedLimitsType, active bool) { + userDefinedLimitsMetric.With(prometheus.Labels{ + "size": string(size), + "stack_id": identifier, + "type": string(limitType), + }).Set(boolValue(active)) +} + +func setGlobalStreamLimitMetric(size lokiv1beta1.LokiStackSizeType, identifier string, rate float64) { + globalStreamLimitMetric.With(prometheus.Labels{ + "size": string(size), + "stack_id": identifier, + }).Set(rate) +} + +func setAverageTenantStreamLimitMetric(size lokiv1beta1.LokiStackSizeType, identifier string, rate float64) { + averageTenantStreamLimitMetric.With(prometheus.Labels{ + "size": string(size), + "stack_id": identifier, + }).Set(rate) +} + +func boolValue(value bool) float64 { + if value { + return 1 + } + return 0 +} + +func streamRate(tenantLimits map[string]lokiv1beta1.LimitsTemplateSpec, ingesters int32) float64 { + var tenants, tenantStreamLimit int32 = 0, 0 + + for _, tenant := range tenantLimits { + if tenant.IngestionLimits == nil || tenant.IngestionLimits.MaxGlobalStreamsPerTenant == 0 { + continue + } + + tenants++ + tenantStreamLimit += tenant.IngestionLimits.MaxGlobalStreamsPerTenant + } + + if tenants == 0 || ingesters == 0 { + return 0 + } + return float64(tenantStreamLimit / ingesters / tenants) +} diff --git a/operator/internal/sizes/predict.go b/operator/internal/sizes/predict.go new file mode 100644 index 0000000000..02d4ff7b35 --- /dev/null +++ b/operator/internal/sizes/predict.go @@ -0,0 +1,126 @@ +package sizes + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/ViaQ/logerr/kverrors" + + "github.com/prometheus/client_golang/api" + promv1 "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" +) + +var ( + metricsClient Client + + // durationDataHour is the default time duration to consider for metric scraping. + // It is passed as first parameter to predict_linear. + durationDataHour = "1h" + // timeoutClient is the timeout duration for prometheus client. + timeoutClient = 10 * time.Second + + // promURL is the URL of the prometheus thanos querier + promURL string + // promToken is the token to connect to prometheus thanos querier. + promToken string +) + +type client struct { + api promv1.API + timeout time.Duration +} + +// Client is the interface which contains methods for querying and extracting metrics. +type Client interface { + LogLoggedBytesReceivedTotal(duration model.Duration) (float64, error) +} + +func newClient(url, token string) (*client, error) { + httpConfig := config.HTTPClientConfig{ + BearerToken: config.Secret(token), + TLSConfig: config.TLSConfig{ + InsecureSkipVerify: true, + }, + } + + rt, rtErr := config.NewRoundTripperFromConfig(httpConfig, "size-calculator-metrics") + + if rtErr != nil { + return nil, kverrors.Wrap(rtErr, "failed creating prometheus configuration") + } + + pc, err := api.NewClient(api.Config{ + Address: url, + RoundTripper: rt, + }) + if err != nil { + return nil, kverrors.Wrap(err, "failed creating prometheus client") + } + + return &client{ + api: promv1.NewAPI(pc), + timeout: timeoutClient, + }, nil +} + +func (c *client) executeScalarQuery(query string) (float64, error) { + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + res, _, err := c.api.Query(ctx, query, time.Now()) + if err != nil { + return 0.0, kverrors.Wrap(err, "failed executing query", + "query", query) + } + + if res.Type() == model.ValScalar { + value := res.(*model.Scalar) + return float64(value.Value), nil + } + + if res.Type() == model.ValVector { + vec := res.(model.Vector) + if vec.Len() == 0 { + return 0.0, nil + } + + return float64(vec[0].Value), nil + } + + return 0.0, kverrors.Wrap(nil, "failed to parse result for query", + "query", query) +} + +func (c *client) LogLoggedBytesReceivedTotal(duration model.Duration) (float64, error) { + query := fmt.Sprintf( + `sum(predict_linear(log_logged_bytes_total[%s], %d))`, + durationDataHour, + int(time.Duration(duration).Seconds()), + ) + + return c.executeScalarQuery(query) +} + +// PredictFor takes the default duration and predicts +// the amount of logs expected in 1 day +func PredictFor(duration model.Duration) (logsCollected float64, err error) { + promURL = os.Getenv("PROMETHEUS_URL") + promToken = os.Getenv("PROMETHEUS_TOKEN") + + // Create a client to collect metrics + metricsClient, err = newClient(promURL, promToken) + if err != nil { + return 0, kverrors.Wrap(err, "Failed to create metrics client") + } + + logsCollected, err = metricsClient.LogLoggedBytesReceivedTotal(duration) + if err != nil { + return 0, err + } + + return logsCollected, nil +} diff --git a/operator/internal/status/components.go b/operator/internal/status/components.go new file mode 100644 index 0000000000..5fc69e8567 --- /dev/null +++ b/operator/internal/status/components.go @@ -0,0 +1,81 @@ +package status + +import ( + "context" + + "github.com/ViaQ/logerr/kverrors" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s" + "github.com/grafana/loki-operator/internal/manifests" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// SetComponentsStatus updates the pod status map component +func SetComponentsStatus(ctx context.Context, k k8s.Client, req ctrl.Request) error { + var s lokiv1beta1.LokiStack + if err := k.Get(ctx, req.NamespacedName, &s); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) + } + + var err error + s.Status.Components = lokiv1beta1.LokiStackComponentStatus{} + s.Status.Components.Compactor, err = appendPodStatus(ctx, k, manifests.LabelCompactorComponent, s.Name, s.Namespace) + if err != nil { + return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelCompactorComponent) + } + + s.Status.Components.Querier, err = appendPodStatus(ctx, k, manifests.LabelQuerierComponent, s.Name, s.Namespace) + if err != nil { + return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelQuerierComponent) + } + + s.Status.Components.Distributor, err = appendPodStatus(ctx, k, manifests.LabelDistributorComponent, s.Name, s.Namespace) + if err != nil { + return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelDistributorComponent) + } + + s.Status.Components.QueryFrontend, err = appendPodStatus(ctx, k, manifests.LabelQueryFrontendComponent, s.Name, s.Namespace) + if err != nil { + return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelQueryFrontendComponent) + } + + s.Status.Components.IndexGateway, err = appendPodStatus(ctx, k, manifests.LabelIndexGatewayComponent, s.Name, s.Namespace) + if err != nil { + return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelIngesterComponent) + } + + s.Status.Components.Ingester, err = appendPodStatus(ctx, k, manifests.LabelIngesterComponent, s.Name, s.Namespace) + if err != nil { + return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelIndexGatewayComponent) + } + + s.Status.Components.Gateway, err = appendPodStatus(ctx, k, manifests.LabelGatewayComponent, s.Name, s.Namespace) + if err != nil { + return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelGatewayComponent) + } + return k.Status().Update(ctx, &s, &client.UpdateOptions{}) +} + +func appendPodStatus(ctx context.Context, k k8s.Client, component, stack, ns string) (lokiv1beta1.PodStatusMap, error) { + psm := lokiv1beta1.PodStatusMap{} + pods := &corev1.PodList{} + opts := []client.ListOption{ + client.MatchingLabels(manifests.ComponentLabels(component, stack)), + client.InNamespace(ns), + } + if err := k.List(ctx, pods, opts...); err != nil { + return nil, kverrors.Wrap(err, "failed to list pods for LokiStack component", "name", stack, "component", component) + } + for _, pod := range pods.Items { + phase := pod.Status.Phase + psm[phase] = append(psm[phase], pod.Name) + } + return psm, nil +} diff --git a/operator/internal/status/components_test.go b/operator/internal/status/components_test.go new file mode 100644 index 0000000000..5fdff1bca2 --- /dev/null +++ b/operator/internal/status/components_test.go @@ -0,0 +1,162 @@ +package status_test + +import ( + "context" + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s/k8sfakes" + "github.com/grafana/loki-operator/internal/status" + "github.com/stretchr/testify/require" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestSetComponentsStatus_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { + k := &k8sfakes.FakeClient{} + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewBadRequest("something wasn't found") + } + + err := status.SetComponentsStatus(context.TODO(), k, r) + require.Error(t, err) +} + +func TestSetComponentsStatus_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { + k := &k8sfakes.FakeClient{} + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := status.SetComponentsStatus(context.TODO(), k, r) + require.NoError(t, err) +} + +func TestSetComponentsStatus_WhenListReturnError_ReturnError(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + + k.StatusStub = func() client.StatusWriter { return sw } + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + k.ListStub = func(_ context.Context, l client.ObjectList, opts ...client.ListOption) error { + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := status.SetComponentsStatus(context.TODO(), k, r) + require.Error(t, err) +} + +func TestSetComponentsStatus_WhenPodListExisting_SetPodStatusMap(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + + k.StatusStub = func() client.StatusWriter { return sw } + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + k.ListStub = func(_ context.Context, l client.ObjectList, _ ...client.ListOption) error { + pods := v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-a", + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-b", + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + }, + }, + } + k.SetClientObjectList(l, &pods) + return nil + } + + expected := lokiv1beta1.PodStatusMap{ + "Pending": []string{"pod-a"}, + "Running": []string{"pod-b"}, + } + + sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + stack := obj.(*lokiv1beta1.LokiStack) + require.Equal(t, expected, stack.Status.Components.Compactor) + return nil + } + + err := status.SetComponentsStatus(context.TODO(), k, r) + require.NoError(t, err) + require.NotZero(t, k.ListCallCount()) + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} diff --git a/operator/internal/status/lokistack.go b/operator/internal/status/lokistack.go new file mode 100644 index 0000000000..83ce626b39 --- /dev/null +++ b/operator/internal/status/lokistack.go @@ -0,0 +1,198 @@ +package status + +import ( + "context" + + "github.com/ViaQ/logerr/kverrors" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// SetReadyCondition updates or appends the condition Ready to the lokistack status conditions. +// In addition it resets all other Status conditions to false. +func SetReadyCondition(ctx context.Context, k k8s.Client, req ctrl.Request) error { + var s lokiv1beta1.LokiStack + if err := k.Get(ctx, req.NamespacedName, &s); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) + } + + for _, cond := range s.Status.Conditions { + if cond.Type == string(lokiv1beta1.ConditionReady) && cond.Status == metav1.ConditionTrue { + return nil + } + } + + ready := metav1.Condition{ + Type: string(lokiv1beta1.ConditionReady), + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Now(), + Message: "All components ready", + Reason: string(lokiv1beta1.ReasonReadyComponents), + } + + index := -1 + for i := range s.Status.Conditions { + // Reset all other conditions first + s.Status.Conditions[i].Status = metav1.ConditionFalse + s.Status.Conditions[i].LastTransitionTime = metav1.Now() + + // Locate existing ready condition if any + if s.Status.Conditions[i].Type == string(lokiv1beta1.ConditionReady) { + index = i + } + } + + if index == -1 { + s.Status.Conditions = append(s.Status.Conditions, ready) + } else { + s.Status.Conditions[index] = ready + } + + return k.Status().Update(ctx, &s, &client.UpdateOptions{}) +} + +// SetFailedCondition updates or appends the condition Failed to the lokistack status conditions. +// In addition it resets all other Status conditions to false. +func SetFailedCondition(ctx context.Context, k k8s.Client, req ctrl.Request) error { + var s lokiv1beta1.LokiStack + if err := k.Get(ctx, req.NamespacedName, &s); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) + } + + for _, cond := range s.Status.Conditions { + if cond.Type == string(lokiv1beta1.ConditionFailed) && cond.Status == metav1.ConditionTrue { + return nil + } + } + + failed := metav1.Condition{ + Type: string(lokiv1beta1.ConditionFailed), + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Now(), + Message: "Some LokiStack components failed", + Reason: string(lokiv1beta1.ReasonFailedComponents), + } + + index := -1 + for i := range s.Status.Conditions { + // Reset all other conditions first + s.Status.Conditions[i].Status = metav1.ConditionFalse + s.Status.Conditions[i].LastTransitionTime = metav1.Now() + + // Locate existing failed condition if any + if s.Status.Conditions[i].Type == string(lokiv1beta1.ConditionFailed) { + index = i + } + } + + if index == -1 { + s.Status.Conditions = append(s.Status.Conditions, failed) + } else { + s.Status.Conditions[index] = failed + } + + return k.Status().Update(ctx, &s, &client.UpdateOptions{}) +} + +// SetPendingCondition updates or appends the condition Pending to the lokistack status conditions. +// In addition it resets all other Status conditions to false. +func SetPendingCondition(ctx context.Context, k k8s.Client, req ctrl.Request) error { + var s lokiv1beta1.LokiStack + if err := k.Get(ctx, req.NamespacedName, &s); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) + } + + for _, cond := range s.Status.Conditions { + if cond.Type == string(lokiv1beta1.ConditionPending) && cond.Status == metav1.ConditionTrue { + return nil + } + } + + pending := metav1.Condition{ + Type: string(lokiv1beta1.ConditionPending), + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Now(), + Message: "Some LokiStack components pending on dependendies", + Reason: string(lokiv1beta1.ReasonPendingComponents), + } + + index := -1 + for i := range s.Status.Conditions { + // Reset all other conditions first + s.Status.Conditions[i].Status = metav1.ConditionFalse + s.Status.Conditions[i].LastTransitionTime = metav1.Now() + + // Locate existing pending condition if any + if s.Status.Conditions[i].Type == string(lokiv1beta1.ConditionPending) { + index = i + } + } + + if index == -1 { + s.Status.Conditions = append(s.Status.Conditions, pending) + } else { + s.Status.Conditions[index] = pending + } + + return k.Status().Update(ctx, &s, &client.UpdateOptions{}) +} + +// SetDegradedCondition appends the condition Degraded to the lokistack status conditions. +func SetDegradedCondition(ctx context.Context, k k8s.Client, req ctrl.Request, msg string, reason lokiv1beta1.LokiStackConditionReason) error { + var s lokiv1beta1.LokiStack + if err := k.Get(ctx, req.NamespacedName, &s); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) + } + + reasonStr := string(reason) + for _, cond := range s.Status.Conditions { + if cond.Type == string(lokiv1beta1.ConditionDegraded) && cond.Reason == reasonStr && cond.Status == metav1.ConditionTrue { + return nil + } + } + + degraded := metav1.Condition{ + Type: string(lokiv1beta1.ConditionDegraded), + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Now(), + Reason: reasonStr, + Message: msg, + } + + index := -1 + for i := range s.Status.Conditions { + // Reset all other conditions first + s.Status.Conditions[i].Status = metav1.ConditionFalse + s.Status.Conditions[i].LastTransitionTime = metav1.Now() + + // Locate existing pending condition if any + if s.Status.Conditions[i].Type == string(lokiv1beta1.ConditionDegraded) { + index = i + } + } + + if index == -1 { + s.Status.Conditions = append(s.Status.Conditions, degraded) + } else { + s.Status.Conditions[index] = degraded + } + + return k.Status().Update(ctx, &s, &client.UpdateOptions{}) +} diff --git a/operator/internal/status/lokistack_test.go b/operator/internal/status/lokistack_test.go new file mode 100644 index 0000000000..c48403a530 --- /dev/null +++ b/operator/internal/status/lokistack_test.go @@ -0,0 +1,691 @@ +package status_test + +import ( + "context" + "testing" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s/k8sfakes" + "github.com/grafana/loki-operator/internal/status" + + "github.com/stretchr/testify/require" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestSetReadyCondition_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { + k := &k8sfakes.FakeClient{} + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewBadRequest("something wasn't found") + } + + err := status.SetReadyCondition(context.TODO(), k, r) + require.Error(t, err) +} + +func TestSetReadyCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { + k := &k8sfakes.FakeClient{} + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := status.SetReadyCondition(context.TODO(), k, r) + require.NoError(t, err) +} + +func TestSetReadyCondition_WhenExisting_DoNothing(t *testing.T) { + k := &k8sfakes.FakeClient{} + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + Status: lokiv1beta1.LokiStackStatus{ + Conditions: []metav1.Condition{ + { + Type: string(lokiv1beta1.ConditionReady), + Status: metav1.ConditionTrue, + }, + }, + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := status.SetReadyCondition(context.TODO(), k, r) + require.NoError(t, err) + require.Zero(t, k.StatusCallCount()) +} + +func TestSetReadyCondition_WhenExisting_SetReadyConditionTrue(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + + k.StatusStub = func() client.StatusWriter { return sw } + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + Status: lokiv1beta1.LokiStackStatus{ + Conditions: []metav1.Condition{ + { + Type: string(lokiv1beta1.ConditionReady), + Status: metav1.ConditionFalse, + }, + }, + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + actual := obj.(*lokiv1beta1.LokiStack) + require.NotEmpty(t, actual.Status.Conditions) + require.Equal(t, metav1.ConditionTrue, actual.Status.Conditions[0].Status) + return nil + } + + err := status.SetReadyCondition(context.TODO(), k, r) + require.NoError(t, err) + + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestSetReadyCondition_WhenNoneExisting_AppendReadyCondition(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + + k.StatusStub = func() client.StatusWriter { return sw } + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + actual := obj.(*lokiv1beta1.LokiStack) + require.NotEmpty(t, actual.Status.Conditions) + return nil + } + + err := status.SetReadyCondition(context.TODO(), k, r) + require.NoError(t, err) + + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestSetFailedCondition_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { + k := &k8sfakes.FakeClient{} + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewBadRequest("something wasn't found") + } + + err := status.SetFailedCondition(context.TODO(), k, r) + require.Error(t, err) +} + +func TestSetFailedCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { + k := &k8sfakes.FakeClient{} + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := status.SetFailedCondition(context.TODO(), k, r) + require.NoError(t, err) +} + +func TestSetFailedCondition_WhenExisting_DoNothing(t *testing.T) { + k := &k8sfakes.FakeClient{} + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + Status: lokiv1beta1.LokiStackStatus{ + Conditions: []metav1.Condition{ + { + Type: string(lokiv1beta1.ConditionFailed), + Status: metav1.ConditionTrue, + }, + }, + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := status.SetFailedCondition(context.TODO(), k, r) + require.NoError(t, err) + require.Zero(t, k.StatusCallCount()) +} + +func TestSetFailedCondition_WhenExisting_SetFailedConditionTrue(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + + k.StatusStub = func() client.StatusWriter { return sw } + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + Status: lokiv1beta1.LokiStackStatus{ + Conditions: []metav1.Condition{ + { + Type: string(lokiv1beta1.ConditionFailed), + Status: metav1.ConditionFalse, + }, + }, + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + actual := obj.(*lokiv1beta1.LokiStack) + require.NotEmpty(t, actual.Status.Conditions) + require.Equal(t, metav1.ConditionTrue, actual.Status.Conditions[0].Status) + return nil + } + + err := status.SetFailedCondition(context.TODO(), k, r) + require.NoError(t, err) + + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestSetFailedCondition_WhenNoneExisting_AppendFailedCondition(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + + k.StatusStub = func() client.StatusWriter { return sw } + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + actual := obj.(*lokiv1beta1.LokiStack) + require.NotEmpty(t, actual.Status.Conditions) + return nil + } + + err := status.SetFailedCondition(context.TODO(), k, r) + require.NoError(t, err) + + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestSetDegradedCondition_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { + k := &k8sfakes.FakeClient{} + + msg := "tell me nothing" + reason := lokiv1beta1.ReasonMissingObjectStorageSecret + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewBadRequest("something wasn't found") + } + + err := status.SetDegradedCondition(context.TODO(), k, r, msg, reason) + require.Error(t, err) +} + +func TestSetPendingCondition_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { + k := &k8sfakes.FakeClient{} + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewBadRequest("something wasn't found") + } + + err := status.SetPendingCondition(context.TODO(), k, r) + require.Error(t, err) +} + +func TestSetPendingCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { + k := &k8sfakes.FakeClient{} + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := status.SetPendingCondition(context.TODO(), k, r) + require.NoError(t, err) +} + +func TestSetPendingCondition_WhenExisting_DoNothing(t *testing.T) { + k := &k8sfakes.FakeClient{} + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + Status: lokiv1beta1.LokiStackStatus{ + Conditions: []metav1.Condition{ + { + Type: string(lokiv1beta1.ConditionPending), + Status: metav1.ConditionTrue, + }, + }, + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := status.SetPendingCondition(context.TODO(), k, r) + require.NoError(t, err) + require.Zero(t, k.StatusCallCount()) +} + +func TestSetPendingCondition_WhenExisting_SetPendingConditionTrue(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + + k.StatusStub = func() client.StatusWriter { return sw } + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + Status: lokiv1beta1.LokiStackStatus{ + Conditions: []metav1.Condition{ + { + Type: string(lokiv1beta1.ConditionPending), + Status: metav1.ConditionFalse, + }, + }, + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + actual := obj.(*lokiv1beta1.LokiStack) + require.NotEmpty(t, actual.Status.Conditions) + require.Equal(t, metav1.ConditionTrue, actual.Status.Conditions[0].Status) + return nil + } + + err := status.SetPendingCondition(context.TODO(), k, r) + require.NoError(t, err) + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestSetPendingCondition_WhenNoneExisting_AppendPendingCondition(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + + k.StatusStub = func() client.StatusWriter { return sw } + + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + actual := obj.(*lokiv1beta1.LokiStack) + require.NotEmpty(t, actual.Status.Conditions) + return nil + } + + err := status.SetPendingCondition(context.TODO(), k, r) + require.NoError(t, err) + + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestSetDegradedCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { + k := &k8sfakes.FakeClient{} + + msg := "tell me nothing" + reason := lokiv1beta1.ReasonMissingObjectStorageSecret + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := status.SetDegradedCondition(context.TODO(), k, r, msg, reason) + require.NoError(t, err) +} + +func TestSetDegradedCondition_WhenExisting_DoNothing(t *testing.T) { + k := &k8sfakes.FakeClient{} + + msg := "tell me nothing" + reason := lokiv1beta1.ReasonMissingObjectStorageSecret + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + Status: lokiv1beta1.LokiStackStatus{ + Conditions: []metav1.Condition{ + { + Type: string(lokiv1beta1.ConditionDegraded), + Reason: string(reason), + Status: metav1.ConditionTrue, + }, + }, + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err := status.SetDegradedCondition(context.TODO(), k, r, msg, reason) + require.NoError(t, err) + require.Zero(t, k.StatusCallCount()) +} + +func TestSetDegradedCondition_WhenExisting_SetDegradedConditionTrue(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + + k.StatusStub = func() client.StatusWriter { return sw } + + msg := "tell me something" + reason := lokiv1beta1.ReasonMissingObjectStorageSecret + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + Status: lokiv1beta1.LokiStackStatus{ + Conditions: []metav1.Condition{ + { + Type: string(lokiv1beta1.ConditionDegraded), + Reason: string(reason), + Status: metav1.ConditionFalse, + }, + }, + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + actual := obj.(*lokiv1beta1.LokiStack) + require.NotEmpty(t, actual.Status.Conditions) + require.Equal(t, metav1.ConditionTrue, actual.Status.Conditions[0].Status) + return nil + } + + err := status.SetDegradedCondition(context.TODO(), k, r, msg, reason) + require.NoError(t, err) + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} + +func TestSetDegradedCondition_WhenNoneExisting_AppendDegradedCondition(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + + k.StatusStub = func() client.StatusWriter { return sw } + + msg := "tell me something" + reason := lokiv1beta1.ReasonMissingObjectStorageSecret + s := lokiv1beta1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { + if r.Name == name.Name && r.Namespace == name.Namespace { + k.SetClientObject(object, &s) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + actual := obj.(*lokiv1beta1.LokiStack) + require.NotEmpty(t, actual.Status.Conditions) + return nil + } + + err := status.SetDegradedCondition(context.TODO(), k, r, msg, reason) + require.NoError(t, err) + + require.NotZero(t, k.StatusCallCount()) + require.NotZero(t, sw.UpdateCallCount()) +} diff --git a/operator/internal/status/status.go b/operator/internal/status/status.go new file mode 100644 index 0000000000..8becab023f --- /dev/null +++ b/operator/internal/status/status.go @@ -0,0 +1,61 @@ +package status + +import ( + "context" + + "github.com/ViaQ/logerr/kverrors" + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/internal/external/k8s" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" +) + +// Refresh executes an aggregate update of the LokiStack Status struct, i.e. +// - It recreates the Status.Components pod status map per component. +// - It sets the appropriate Status.Condition to true that matches the pod status maps. +func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request) error { + if err := SetComponentsStatus(ctx, k, req); err != nil { + return err + } + + var s lokiv1beta1.LokiStack + if err := k.Get(ctx, req.NamespacedName, &s); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) + } + + cs := s.Status.Components + + // Check for failed pods first + failed := len(cs.Compactor[corev1.PodFailed]) + + len(cs.Distributor[corev1.PodFailed]) + + len(cs.Ingester[corev1.PodFailed]) + + len(cs.Querier[corev1.PodFailed]) + + len(cs.QueryFrontend[corev1.PodFailed]) + + unknown := len(cs.Compactor[corev1.PodUnknown]) + + len(cs.Distributor[corev1.PodUnknown]) + + len(cs.Ingester[corev1.PodUnknown]) + + len(cs.Querier[corev1.PodUnknown]) + + len(cs.QueryFrontend[corev1.PodUnknown]) + + if failed != 0 || unknown != 0 { + return SetFailedCondition(ctx, k, req) + } + + // Check for pending pods + pending := len(cs.Compactor[corev1.PodPending]) + + len(cs.Distributor[corev1.PodPending]) + + len(cs.Ingester[corev1.PodPending]) + + len(cs.Querier[corev1.PodPending]) + + len(cs.QueryFrontend[corev1.PodPending]) + + if pending != 0 { + return SetPendingCondition(ctx, k, req) + } + return SetReadyCondition(ctx, k, req) +} diff --git a/operator/main.go b/operator/main.go new file mode 100644 index 0000000000..042e1afa24 --- /dev/null +++ b/operator/main.go @@ -0,0 +1,157 @@ +package main + +import ( + "flag" + "net/http" + "net/http/pprof" + "os" + + "github.com/ViaQ/logerr/log" + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + lokiv1beta1 "github.com/grafana/loki-operator/api/v1beta1" + "github.com/grafana/loki-operator/controllers" + "github.com/grafana/loki-operator/internal/manifests" + "github.com/grafana/loki-operator/internal/metrics" + configv1 "github.com/openshift/api/config/v1" + routev1 "github.com/openshift/api/route/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + // +kubebuilder:scaffold:imports +) + +var scheme = runtime.NewScheme() + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(lokiv1beta1.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func main() { + var ( + metricsAddr string + enableLeaderElection bool + probeAddr string + enableCertSigning bool + enableServiceMonitors bool + enableTLSServiceMonitors bool + enableGateway bool + enableGatewayRoute bool + ) + + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&enableCertSigning, "with-cert-signing-service", false, + "Enables features in an Openshift cluster.") + flag.BoolVar(&enableServiceMonitors, "with-service-monitors", false, "Enables service monitoring") + flag.BoolVar(&enableTLSServiceMonitors, "with-tls-service-monitors", false, + "Enables loading of a prometheus service monitor.") + flag.BoolVar(&enableGateway, "with-lokistack-gateway", false, + "Enables the manifest creation for the entire lokistack-gateway.") + flag.BoolVar(&enableGatewayRoute, "with-lokistack-gateway-route", false, + "Enables the usage of Route for the lokistack-gateway instead of Ingress (OCP Only!)") + flag.Parse() + + log.Init("loki-operator") + ctrl.SetLogger(log.GetLogger()) + + if enableServiceMonitors || enableTLSServiceMonitors { + utilruntime.Must(monitoringv1.AddToScheme(scheme)) + } + + if enableGateway { + utilruntime.Must(configv1.AddToScheme(scheme)) + + if enableGatewayRoute { + utilruntime.Must(routev1.AddToScheme(scheme)) + } + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + Port: 9443, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "e3716011.grafana.com", + }) + if err != nil { + log.Error(err, "unable to start manager") + os.Exit(1) + } + + featureFlags := manifests.FeatureFlags{ + EnableCertificateSigningService: enableCertSigning, + EnableServiceMonitors: enableServiceMonitors, + EnableTLSServiceMonitorConfig: enableTLSServiceMonitors, + EnableGateway: enableGateway, + EnableGatewayRoute: enableGatewayRoute, + } + + if err = (&controllers.LokiStackReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("controllers").WithName("LokiStack"), + Scheme: mgr.GetScheme(), + Flags: featureFlags, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "LokiStack") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + if err = mgr.AddHealthzCheck("health", healthz.Ping); err != nil { + log.Error(err, "unable to set up health check") + os.Exit(1) + } + if err = mgr.AddReadyzCheck("check", healthz.Ping); err != nil { + log.Error(err, "unable to set up ready check") + os.Exit(1) + } + + log.Info("registering metrics") + metrics.RegisterMetricCollectors() + + log.Info("Registring profiling endpoints.") + err = registerProfiler(mgr) + if err != nil { + log.Error(err, "failed to register extra pprof handler") + os.Exit(1) + } + + log.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + log.Error(err, "problem running manager") + os.Exit(1) + } +} + +func registerProfiler(m ctrl.Manager) error { + endpoints := map[string]http.HandlerFunc{ + "/debug/pprof/": pprof.Index, + "/debug/pprof/cmdline": pprof.Cmdline, + "/debug/pprof/profile": pprof.Profile, + "/debug/pprof/symbol": pprof.Symbol, + "/debug/pprof/trace": pprof.Trace, + } + + for path, handler := range endpoints { + err := m.AddMetricsExtraHandler(path, handler) + if err != nil { + return err + } + } + + return nil +} diff --git a/operator/tools/tools.go b/operator/tools/tools.go new file mode 100644 index 0000000000..1316cf0af2 --- /dev/null +++ b/operator/tools/tools.go @@ -0,0 +1,10 @@ +// +build tools + +package tools + +import ( + _ "github.com/maxbrunsfeld/counterfeiter/v6" +) + +// This file imports packages that are used when running go generate, or used +// during the development process but not otherwise depended on by built code.