Add support for openshift-logging tenant mode (#93)

pull/4881/head
Periklis Tsirakidis 4 years ago committed by GitHub
parent 54b334eb9e
commit ac07fc06ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      bundle/manifests/loki-operator.clusterserviceversion.yaml
  2. 2
      config/overlays/openshift/manager_related_image_patch.yaml
  3. 1
      config/overlays/openshift/manager_run_flags_patch.yaml
  4. 2
      config/overlays/production/manager_related_image_patch.yaml
  5. 13
      hack/lokistack_gateway_ocp.yaml
  6. 134
      internal/handlers/internal/gateway/tenant_secrets.go
  7. 214
      internal/handlers/internal/gateway/tenant_secrets_test.go
  8. 37
      internal/handlers/lokistack_create_or_update.go
  9. 18
      internal/handlers/lokistack_create_or_update_test.go
  10. 8
      internal/manifests/compactor.go
  11. 10
      internal/manifests/distributor.go
  12. 118
      internal/manifests/gateway.go
  13. 269
      internal/manifests/gateway_tenant_sidecars.go
  14. 438
      internal/manifests/gateway_tenant_sidecars_test.go
  15. 28
      internal/manifests/gateway_test.go
  16. 10
      internal/manifests/ingester.go
  17. 12
      internal/manifests/internal/gateway/build.go
  18. 52
      internal/manifests/internal/gateway/build_test.go
  19. 62
      internal/manifests/internal/gateway/gateway-tenants.yaml
  20. 2
      internal/manifests/memberlist.go
  21. 10
      internal/manifests/querier.go
  22. 8
      internal/manifests/query-frontend.go
  23. 22
      internal/manifests/service_monitor.go
  24. 31
      internal/manifests/service_monitor_test.go
  25. 29
      internal/manifests/var.go

@ -536,6 +536,7 @@ spec:
spec:
containers:
- args:
- --with-lokistack-gateway
- --with-cert-signing-service
- --with-service-monitors
- --with-tls-service-monitors
@ -544,6 +545,8 @@ spec:
env:
- name: RELATED_IMAGE_LOKI
value: quay.io/openshift-logging/loki:v2.2.0-10
- name: RELATED_IMAGE_OPA
value: quay.io/observatorium/opa-openshift:latest
image: quay.io/openshift-logging/loki-operator:v0.0.1
imagePullPolicy: IfNotPresent
livenessProbe:

@ -12,3 +12,5 @@ spec:
env:
- name: RELATED_IMAGE_LOKI
value: quay.io/openshift-logging/loki:v2.2.0-10
- name: RELATED_IMAGE_OPA
value: quay.io/observatorium/opa-openshift:latest

@ -10,6 +10,7 @@ spec:
containers:
- name: manager
args:
- "--with-lokistack-gateway"
- "--with-cert-signing-service"
- "--with-service-monitors"
- "--with-tls-service-monitors"

@ -12,3 +12,5 @@ spec:
env:
- name: RELATED_IMAGE_LOKI
value: docker.io/grafana/loki:2.2.0
- name: RELATED_IMAGE_OPA
value: quay.io/observatorium/opa-openshift:latest

@ -0,0 +1,13 @@
apiVersion: loki.openshift.io/v1beta1
kind: LokiStack
metadata:
name: lokistack-dev
spec:
size: 1x.extra-small
replicationFactor: 1
storage:
secret:
name: test
storageClassName: gp2
tenants:
mode: openshift-logging

@ -0,0 +1,134 @@
package gateway
import (
"context"
"fmt"
"github.com/ViaQ/logerr/kverrors"
lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1"
"github.com/ViaQ/loki-operator/internal/external/k8s"
"github.com/ViaQ/loki-operator/internal/handlers/internal/secrets"
"github.com/ViaQ/loki-operator/internal/manifests"
"github.com/ViaQ/loki-operator/internal/status"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// GetTenantSecrets returns the list to gateway tenant secrets for a tenant mode.
// For modes static and dynamic the secrets are fetched from external provided
// secrets. For mode openshift-logging a secret per default tenants are created.
// All secrets live in the same namespace as the lokistack request.
func GetTenantSecrets(
ctx context.Context,
k k8s.Client,
req ctrl.Request,
scheme *runtime.Scheme,
stack *lokiv1beta1.LokiStack,
) ([]*manifests.TenantSecrets, error) {
switch stack.Spec.Tenants.Mode {
case lokiv1beta1.Static, lokiv1beta1.Dynamic:
return extractUserProvidedSecrets(ctx, k, req, stack)
case lokiv1beta1.OpenshiftLogging:
return createOpenShiftLoggingSecrets(ctx, k, req, scheme, stack)
}
return nil, nil
}
func extractUserProvidedSecrets(
ctx context.Context,
k k8s.Client,
req ctrl.Request,
stack *lokiv1beta1.LokiStack,
) ([]*manifests.TenantSecrets, error) {
var (
tenantSecrets []*manifests.TenantSecrets
gatewaySecret corev1.Secret
)
for _, tenant := range stack.Spec.Tenants.Authentication {
key := client.ObjectKey{Name: tenant.OIDC.Secret.Name, Namespace: req.Namespace}
if err := k.Get(ctx, key, &gatewaySecret); err != nil {
if apierrors.IsNotFound(err) {
return nil, status.SetDegradedCondition(ctx, k, req,
fmt.Sprintf("Missing secrets for tenant %s", tenant.TenantName),
lokiv1beta1.ReasonMissingGatewayTenantSecret,
)
}
return nil, kverrors.Wrap(err, "failed to lookup lokistack gateway tenant secret",
"name", key)
}
var ts *manifests.TenantSecrets
ts, err := secrets.ExtractGatewaySecret(&gatewaySecret, tenant.TenantName)
if err != nil {
return nil, status.SetDegradedCondition(ctx, k, req,
"Invalid gateway tenant secret contents",
lokiv1beta1.ReasonInvalidGatewayTenantSecret,
)
}
tenantSecrets = append(tenantSecrets, ts)
}
return tenantSecrets, nil
}
func createOpenShiftLoggingSecrets(
ctx context.Context,
k k8s.Client,
req ctrl.Request,
scheme *runtime.Scheme,
stack *lokiv1beta1.LokiStack,
) ([]*manifests.TenantSecrets, error) {
var tenantSecrets []*manifests.TenantSecrets
gatewayName := manifests.GatewayName(stack.Name)
for _, name := range manifests.OpenShiftDefaultTenants {
s := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", gatewayName, name),
Namespace: stack.Namespace,
},
Data: map[string][]byte{
// TODO Fill these with production data when we integrate dex.
"clientID": []byte("clientID"),
"clientSecret": []byte("clientSecret"),
"issuerCAPath": []byte("/path/to/ca/file"),
},
}
if err := ctrl.SetControllerReference(stack, s, scheme); err != nil {
return nil, status.SetDegradedCondition(ctx, k, req,
fmt.Sprintf("Missing secrets for tenant %s", name),
lokiv1beta1.ReasonMissingGatewayTenantSecret,
)
}
if err := k.Create(ctx, s, &client.CreateOptions{}); err != nil {
if !apierrors.IsAlreadyExists(err) {
return nil, status.SetDegradedCondition(ctx, k, req,
fmt.Sprintf("Missing secrets for tenant %s", name),
lokiv1beta1.ReasonMissingGatewayTenantSecret,
)
}
}
var ts *manifests.TenantSecrets
ts, err := secrets.ExtractGatewaySecret(s, name)
if err != nil {
return nil, status.SetDegradedCondition(ctx, k, req,
"Invalid gateway tenant secret contents",
lokiv1beta1.ReasonInvalidGatewayTenantSecret,
)
}
tenantSecrets = append(tenantSecrets, ts)
}
return tenantSecrets, nil
}

@ -0,0 +1,214 @@
package gateway
import (
"context"
"testing"
"github.com/stretchr/testify/require"
lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1"
"github.com/ViaQ/loki-operator/internal/external/k8s/k8sfakes"
"github.com/ViaQ/loki-operator/internal/manifests"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var scheme = runtime.NewScheme()
func TestGetTenantSecrets_StaticMode(t *testing.T) {
k := &k8sfakes.FakeClient{}
r := ctrl.Request{
NamespacedName: types.NamespacedName{
Name: "my-stack",
Namespace: "some-ns",
},
}
s := &lokiv1beta1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "mystack",
Namespace: "some-ns",
},
Spec: lokiv1beta1.LokiStackSpec{
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.Static,
Authentication: []lokiv1beta1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "test",
OIDC: &lokiv1beta1.OIDCSpec{
Secret: &lokiv1beta1.TenantSecretSpec{
Name: "test",
},
},
},
},
},
},
}
k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error {
if name.Name == "test" && name.Namespace == "some-ns" {
k.SetClientObject(object, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "some-ns",
},
Data: map[string][]byte{
"clientID": []byte("test"),
"clientSecret": []byte("test"),
"issuerCAPath": []byte("/path/to/ca/file"),
},
})
}
return nil
}
ts, err := GetTenantSecrets(context.TODO(), k, r, scheme, s)
require.NoError(t, err)
expected := []*manifests.TenantSecrets{
{
TenantName: "test",
ClientID: "test",
ClientSecret: "test",
IssuerCAPath: "/path/to/ca/file",
},
}
require.ElementsMatch(t, ts, expected)
}
func TestGetTenantSecrets_DynamicMode(t *testing.T) {
k := &k8sfakes.FakeClient{}
r := ctrl.Request{
NamespacedName: types.NamespacedName{
Name: "my-stack",
Namespace: "some-ns",
},
}
s := &lokiv1beta1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "mystack",
Namespace: "some-ns",
},
Spec: lokiv1beta1.LokiStackSpec{
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.Dynamic,
Authentication: []lokiv1beta1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "test",
OIDC: &lokiv1beta1.OIDCSpec{
Secret: &lokiv1beta1.TenantSecretSpec{
Name: "test",
},
},
},
},
},
},
}
k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error {
if name.Name == "test" && name.Namespace == "some-ns" {
k.SetClientObject(object, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "some-ns",
},
Data: map[string][]byte{
"clientID": []byte("test"),
"clientSecret": []byte("test"),
"issuerCAPath": []byte("/path/to/ca/file"),
},
})
}
return nil
}
ts, err := GetTenantSecrets(context.TODO(), k, r, scheme, s)
require.NoError(t, err)
expected := []*manifests.TenantSecrets{
{
TenantName: "test",
ClientID: "test",
ClientSecret: "test",
IssuerCAPath: "/path/to/ca/file",
},
}
require.ElementsMatch(t, ts, expected)
}
func TestGetTenantSecrets_OpenShiftLoggingMode(t *testing.T) {
// Register the clientgo and CRD schemes
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(lokiv1beta1.AddToScheme(scheme))
k := &k8sfakes.FakeClient{}
r := ctrl.Request{
NamespacedName: types.NamespacedName{
Name: "my-stack",
Namespace: "some-ns",
},
}
s := &lokiv1beta1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
Spec: lokiv1beta1.LokiStackSpec{
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.OpenshiftLogging,
},
},
}
expectedNames := []string{
"lokistack-gateway-my-stack-application",
"lokistack-gateway-my-stack-infrastructure",
"lokistack-gateway-my-stack-audit",
}
k.CreateStub = func(_ context.Context, object client.Object, _ ...client.CreateOption) error {
require.Contains(t, expectedNames, object.GetName())
require.Equal(t, "some-ns", object.GetNamespace())
return nil
}
ts, err := GetTenantSecrets(context.TODO(), k, r, scheme, s)
require.NoError(t, err)
require.Equal(t, 3, k.CreateCallCount())
expected := []*manifests.TenantSecrets{
{
TenantName: "application",
ClientID: "clientID",
ClientSecret: "clientSecret",
IssuerCAPath: "/path/to/ca/file",
},
{
TenantName: "infrastructure",
ClientID: "clientID",
ClientSecret: "clientSecret",
IssuerCAPath: "/path/to/ca/file",
},
{
TenantName: "audit",
ClientID: "clientID",
ClientSecret: "clientSecret",
IssuerCAPath: "/path/to/ca/file",
},
}
require.ElementsMatch(t, ts, expected)
}

@ -64,7 +64,7 @@ func CreateOrUpdateLokiStack(ctx context.Context, req ctrl.Request, k k8s.Client
}
var tenantSecrets []*manifests.TenantSecrets
if stack.Spec.Tenants != nil {
if flags.EnableGateway && stack.Spec.Tenants != nil {
if err = gateway.ValidateModes(stack); err != nil {
return status.SetDegradedCondition(ctx, k, req,
fmt.Sprintf("Invalid tenants configuration: %s", err),
@ -72,31 +72,9 @@ func CreateOrUpdateLokiStack(ctx context.Context, req ctrl.Request, k k8s.Client
)
}
if stack.Spec.Tenants.Mode != lokiv1beta1.OpenshiftLogging {
var gatewaySecret corev1.Secret
for _, tenant := range stack.Spec.Tenants.Authentication {
key := client.ObjectKey{Name: tenant.OIDC.Secret.Name, Namespace: stack.Namespace}
if err = k.Get(ctx, key, &gatewaySecret); err != nil {
if apierrors.IsNotFound(err) {
return status.SetDegradedCondition(ctx, k, req,
fmt.Sprintf("Missing secrets for tenant %s", tenant.TenantName),
lokiv1beta1.ReasonMissingGatewayTenantSecret,
)
}
return kverrors.Wrap(err, "failed to lookup lokistack gateway tenant secret",
"name", key)
}
var ts *manifests.TenantSecrets
ts, err = secrets.ExtractGatewaySecret(&gatewaySecret, tenant.TenantID)
if err != nil {
return status.SetDegradedCondition(ctx, k, req,
"Invalid gateway tenant secret contents",
lokiv1beta1.ReasonInvalidGatewayTenantSecret,
)
}
tenantSecrets = append(tenantSecrets, ts)
}
tenantSecrets, err = gateway.GetTenantSecrets(ctx, k, req, s, &stack)
if err != nil {
return err
}
}
@ -118,6 +96,13 @@ func CreateOrUpdateLokiStack(ctx context.Context, req ctrl.Request, k k8s.Client
return optErr
}
if flags.EnableGateway {
if optErr := manifests.ApplyGatewayDefaultOptions(&opts); optErr != nil {
ll.Error(optErr, "failed to apply defaults options to gateway settings ")
return err
}
}
objects, err := manifests.BuildAll(opts)
if err != nil {
ll.Error(err, "failed to build manifests")

@ -722,6 +722,10 @@ func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *
},
}
ff := manifests.FeatureFlags{
EnableGateway: true,
}
stack := &lokiv1beta1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
@ -772,7 +776,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *
k.StatusStub = func() client.StatusWriter { return sw }
err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, ff)
// make sure error is returned to re-trigger reconciliation
require.NoError(t, err)
@ -792,6 +796,10 @@ func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing
},
}
ff := manifests.FeatureFlags{
EnableGateway: true,
}
stack := &lokiv1beta1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
@ -846,7 +854,7 @@ func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing
k.StatusStub = func() client.StatusWriter { return sw }
err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, ff)
// make sure error is returned to re-trigger reconciliation
require.NoError(t, err)
@ -866,6 +874,10 @@ func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing
},
}
ff := manifests.FeatureFlags{
EnableGateway: true,
}
stack := &lokiv1beta1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
@ -924,7 +936,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing
k.StatusStub = func() client.StatusWriter { return sw }
err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, ff)
// make sure error is returned to re-trigger reconciliation
require.NoError(t, err)

@ -90,12 +90,12 @@ func NewCompactorStatefulSet(opts Options) *appsv1.StatefulSet {
},
Ports: []corev1.ContainerPort{
{
Name: "metrics",
Name: lokiHTTPPortName,
ContainerPort: httpPort,
Protocol: protocolTCP,
},
{
Name: "grpc",
Name: lokiGRPCPortName,
ContainerPort: grpcPort,
Protocol: protocolTCP,
},
@ -192,7 +192,7 @@ func NewCompactorGRPCService(opts Options) *corev1.Service {
ClusterIP: "None",
Ports: []corev1.ServicePort{
{
Name: "grpc",
Name: lokiGRPCPortName,
Port: grpcPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: grpcPort},
@ -222,7 +222,7 @@ func NewCompactorHTTPService(opts Options) *corev1.Service {
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "metrics",
Name: lokiHTTPPortName,
Port: httpPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: httpPort},

@ -101,17 +101,17 @@ func NewDistributorDeployment(opts Options) *appsv1.Deployment {
},
Ports: []corev1.ContainerPort{
{
Name: "metrics",
Name: lokiHTTPPortName,
ContainerPort: httpPort,
Protocol: protocolTCP,
},
{
Name: "grpc",
Name: lokiGRPCPortName,
ContainerPort: grpcPort,
Protocol: protocolTCP,
},
{
Name: "gossip-ring",
Name: lokiGossipPortName,
ContainerPort: gossipPort,
Protocol: protocolTCP,
},
@ -189,7 +189,7 @@ func NewDistributorGRPCService(opts Options) *corev1.Service {
ClusterIP: "None",
Ports: []corev1.ServicePort{
{
Name: "grpc",
Name: lokiGRPCPortName,
Port: grpcPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: grpcPort},
@ -219,7 +219,7 @@ func NewDistributorHTTPService(opts Options) *corev1.Service {
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "metrics",
Name: lokiHTTPPortName,
Port: httpPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: httpPort},

@ -14,30 +14,43 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
)
const (
tlsMetricsSercetVolume = "tls-metrics-secret"
)
// BuildGateway returns a list of k8s objects for Loki Stack Gateway
func BuildGateway(opts Options) ([]client.Object, error) {
gatewayCm, sha1C, err := gatewayConfigMap(opts)
cm, sha1C, err := gatewayConfigMap(opts)
if err != nil {
return nil, err
}
deployment := NewGatewayDeployment(opts, sha1C)
dpl := NewGatewayDeployment(opts, sha1C)
svc := NewGatewayHTTPService(opts)
if opts.Flags.EnableTLSServiceMonitorConfig {
if err := configureGatewayMetricsPKI(&deployment.Spec.Template.Spec); err != nil {
serviceName := serviceNameGatewayHTTP(opts.Name)
if err := configureGatewayMetricsPKI(&dpl.Spec.Template.Spec, serviceName); err != nil {
return nil, err
}
}
return []client.Object{
gatewayCm,
deployment,
NewGatewayHTTPService(opts),
}, nil
if opts.Stack.Tenants != nil {
mode := opts.Stack.Tenants.Mode
if err := configureDeploymentForMode(&dpl.Spec, mode, opts.Flags); err != nil {
return nil, err
}
if err := configureServiceForMode(&svc.Spec, mode); err != nil {
return nil, err
}
}
return []client.Object{cm, dpl, svc}, nil
}
// NewGatewayDeployment creates a deployment object for a lokiStack-gateway
@ -85,8 +98,9 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
},
Args: []string{
fmt.Sprintf("--debug.name=%s", LabelGatewayComponent),
"--web.listen=0.0.0.0:8080",
"--web.internal.listen=0.0.0.0:8081",
fmt.Sprintf("--web.listen=0.0.0.0:%d", gatewayHTTPPort),
fmt.Sprintf("--web.internal.listen=0.0.0.0:%d", gatewayInternalPort),
fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", gatewayHTTPPort),
"--log.level=debug",
fmt.Sprintf("--logs.read.endpoint=http://%s:%d", fqdn(serviceNameQueryFrontendHTTP(opts.Name), opts.Namespace), httpPort),
fmt.Sprintf("--logs.tail.endpoint=http://%s:%d", fqdn(serviceNameQueryFrontendHTTP(opts.Name), opts.Namespace), httpPort),
@ -96,16 +110,12 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
},
Ports: []corev1.ContainerPort{
{
Name: "internal",
ContainerPort: 8081,
},
{
Name: "public",
ContainerPort: 8080,
Name: gatewayInternalPortName,
ContainerPort: gatewayInternalPort,
},
{
Name: "metrics",
ContainerPort: httpPort,
Name: gatewayHTTPPortName,
ContainerPort: gatewayHTTPPort,
},
},
VolumeMounts: []corev1.VolumeMount{
@ -132,7 +142,7 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(8081),
Port: intstr.FromInt(gatewayInternalPort),
Scheme: corev1.URISchemeHTTP,
},
},
@ -144,7 +154,7 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(8081),
Port: intstr.FromInt(gatewayInternalPort),
Scheme: corev1.URISchemeHTTP,
},
},
@ -171,12 +181,12 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32Ptr(1),
Selector: &metav1.LabelSelector{
MatchLabels: labels.Merge(l, GossipLabels()),
MatchLabels: l,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: GatewayName(opts.Name),
Labels: labels.Merge(l, GossipLabels()),
Labels: l,
Annotations: a,
},
Spec: podSpec,
@ -207,8 +217,12 @@ func NewGatewayHTTPService(opts Options) *corev1.Service {
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "metrics",
Port: httpPort,
Name: gatewayHTTPPortName,
Port: gatewayHTTPPort,
},
{
Name: gatewayInternalPortName,
Port: gatewayInternalPort,
},
},
Selector: l,
@ -269,24 +283,26 @@ func gatewayConfigOptions(opt Options) gateway.Options {
}
}
func configureGatewayMetricsPKI(podSpec *corev1.PodSpec) error {
func configureGatewayMetricsPKI(podSpec *corev1.PodSpec, serviceName string) error {
var gwIndex int
for i, c := range podSpec.Containers {
if c.Name == LabelGatewayComponent {
gwIndex = i
break
}
}
secretName := signingServiceSecretName(serviceName)
certFile := path.Join(gateway.LokiGatewayTLSDir, gateway.LokiGatewayCertFile)
keyFile := path.Join(gateway.LokiGatewayTLSDir, gateway.LokiGatewayKeyFile)
secretVolumeSpec := corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "tls-secret",
Name: tlsMetricsSercetVolume,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: LabelGatewayComponent,
},
},
},
{
Name: "tls-configmap",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: LabelGatewayComponent,
},
SecretName: secretName,
},
},
},
@ -295,28 +311,14 @@ func configureGatewayMetricsPKI(podSpec *corev1.PodSpec) error {
secretContainerSpec := corev1.Container{
VolumeMounts: []corev1.VolumeMount{
{
Name: "tls-secret",
ReadOnly: true,
MountPath: path.Join(gateway.LokiGatewayTLSDir, "cert"),
SubPath: "cert",
},
{
Name: "tls-secret",
ReadOnly: true,
MountPath: path.Join(gateway.LokiGatewayTLSDir, "key"),
SubPath: "key",
},
{
Name: "tls-configmap",
Name: tlsMetricsSercetVolume,
ReadOnly: true,
MountPath: path.Join(gateway.LokiGatewayTLSDir, "ca"),
SubPath: "ca",
MountPath: gateway.LokiGatewayTLSDir,
},
},
Args: []string{
fmt.Sprintf("--tls.internal.server.cert-file=%s", path.Join(gateway.LokiGatewayTLSDir, "cert")),
fmt.Sprintf("--tls.internal.server.key-file=%s", path.Join(gateway.LokiGatewayTLSDir, "key")),
fmt.Sprintf("--tls.healthchecks.server-ca-file=%s", path.Join(gateway.LokiGatewayTLSDir, "ca")),
fmt.Sprintf("--tls.internal.server.cert-file=%s", certFile),
fmt.Sprintf("--tls.internal.server.key-file=%s", keyFile),
},
}
uriSchemeContainerSpec := corev1.Container{
@ -340,11 +342,11 @@ func configureGatewayMetricsPKI(podSpec *corev1.PodSpec) error {
return kverrors.Wrap(err, "failed to merge volumes")
}
if err := mergo.Merge(&podSpec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil {
if err := mergo.Merge(&podSpec.Containers[gwIndex], secretContainerSpec, mergo.WithAppendSlice); err != nil {
return kverrors.Wrap(err, "failed to merge container")
}
if err := mergo.Merge(&podSpec.Containers[0], uriSchemeContainerSpec, mergo.WithOverride); err != nil {
if err := mergo.Merge(&podSpec.Containers[gwIndex], uriSchemeContainerSpec, mergo.WithOverride); err != nil {
return kverrors.Wrap(err, "failed to merge container")
}

@ -0,0 +1,269 @@
package manifests
import (
"fmt"
"os"
"path"
"github.com/ViaQ/logerr/kverrors"
"github.com/google/uuid"
"github.com/imdario/mergo"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1"
"github.com/ViaQ/loki-operator/internal/manifests/internal/gateway"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
envRelatedImageOPA = "RELATED_IMAGE_OPA"
defaultOPAImage = "quay.io/observatorium/opa-openshift:latest"
opaContainerName = "opa-openshift"
opaDefaultPackage = "lokistack"
opaDefaultAPIGroup = "loki.openshift.io"
opaMetricsPortName = "opa-metrics"
// OpenShiftApplicationTenant is the tenant name for tenant holding application logs.
OpenShiftApplicationTenant = "application"
// OpenShiftInfraTenant is the tenant name for tenant holding infrastructure logs.
OpenShiftInfraTenant = "infrastructure"
// OpenShiftAuditTenant is the tenant name for tenant holding audit logs.
OpenShiftAuditTenant = "audit"
)
// OpenShiftDefaultTenants represents the slice of all supported LokiStack on OpenShift.
var OpenShiftDefaultTenants = []string{
OpenShiftApplicationTenant,
OpenShiftInfraTenant,
OpenShiftAuditTenant,
}
// ApplyGatewayDefaultOptions applies defaults on the LokiStackSpec depending on selected
// tenant mode. Currently nothing is applied for modes static and dynamic. For mode openshift-logging
// the tenant spec is filled with defaults for authentication and authorization.
func ApplyGatewayDefaultOptions(opts *Options) error {
switch opts.Stack.Tenants.Mode {
case lokiv1beta1.Static, lokiv1beta1.Dynamic:
return nil // continue using user input
case lokiv1beta1.OpenshiftLogging:
var authn []lokiv1beta1.AuthenticationSpec
for _, name := range OpenShiftDefaultTenants {
authn = append(authn, lokiv1beta1.AuthenticationSpec{
TenantName: name,
TenantID: uuid.New().String(),
OIDC: &lokiv1beta1.OIDCSpec{
// TODO Setup when we integrate dex as a separate sidecar here
IssuerURL: "https://127.0.0.1:5556/dex",
RedirectURL: fmt.Sprintf("http://localhost:%d/oidc/%s/callback", gatewayHTTPPort, name),
UsernameClaim: "name",
},
})
}
defaults := &lokiv1beta1.TenantsSpec{
Authentication: authn,
Authorization: &lokiv1beta1.AuthorizationSpec{
OPA: &lokiv1beta1.OPASpec{
URL: fmt.Sprintf("http://localhost:%d/data/%s/allow", gatewayOPAHTTPPort, opaDefaultPackage),
},
},
}
if err := mergo.Merge(opts.Stack.Tenants, defaults, mergo.WithOverride); err != nil {
return kverrors.Wrap(err, "failed to merge defaults for mode openshift logging")
}
}
return nil
}
func configureDeploymentForMode(d *appsv1.DeploymentSpec, mode lokiv1beta1.ModeType, flags FeatureFlags) error {
switch mode {
case lokiv1beta1.Static, lokiv1beta1.Dynamic:
return nil // nothing to configure
case lokiv1beta1.OpenshiftLogging:
return configureDeploymentForOpenShiftLogging(d, flags)
}
return nil
}
func configureServiceForMode(s *corev1.ServiceSpec, mode lokiv1beta1.ModeType) error {
switch mode {
case lokiv1beta1.Static, lokiv1beta1.Dynamic:
return nil // nothing to configure
case lokiv1beta1.OpenshiftLogging:
return configureServiceForOpenShiftLogging(s)
}
return nil
}
func configureServiceMonitorForMode(sm *monitoringv1.ServiceMonitor, mode lokiv1beta1.ModeType, flags FeatureFlags) error {
switch mode {
case lokiv1beta1.Static, lokiv1beta1.Dynamic:
return nil // nothing to configure
case lokiv1beta1.OpenshiftLogging:
return configureServiceMonitorForOpenShiftLogging(sm, flags)
}
return nil
}
func configureDeploymentForOpenShiftLogging(d *appsv1.DeploymentSpec, flags FeatureFlags) error {
p := corev1.PodSpec{
Containers: []corev1.Container{
newOPAOpenShiftContainer(flags),
},
}
if err := mergo.Merge(&d.Template.Spec, p, mergo.WithAppendSlice); err != nil {
return kverrors.Wrap(err, "failed to merge sidecar containers")
}
return nil
}
func newOPAOpenShiftContainer(flags FeatureFlags) corev1.Container {
var (
image string
args []string
uriScheme corev1.URIScheme
volumeMounts []corev1.VolumeMount
)
image = os.Getenv(envRelatedImageOPA)
if image == "" {
image = defaultOPAImage
}
uriScheme = corev1.URISchemeHTTP
args = []string{
"--log.level=warn",
fmt.Sprintf("--opa.package=%s", opaDefaultPackage),
fmt.Sprintf("--web.listen=:%d", gatewayOPAHTTPPort),
fmt.Sprintf("--web.internal.listen=:%d", gatewayOPAInternalPort),
fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", gatewayOPAHTTPPort),
}
if flags.EnableTLSServiceMonitorConfig {
certFile := path.Join(gateway.LokiGatewayTLSDir, gateway.LokiGatewayCertFile)
keyFile := path.Join(gateway.LokiGatewayTLSDir, gateway.LokiGatewayKeyFile)
args = append(args, []string{
fmt.Sprintf("--tls.internal.server.cert-file=%s", certFile),
fmt.Sprintf("--tls.internal.server.key-file=%s", keyFile),
}...)
uriScheme = corev1.URISchemeHTTPS
volumeMounts = []corev1.VolumeMount{
{
Name: tlsMetricsSercetVolume,
ReadOnly: true,
MountPath: gateway.LokiGatewayTLSDir,
},
}
}
for _, t := range OpenShiftDefaultTenants {
args = append(args, fmt.Sprintf(`--openshift.mappings=%s=%s`, t, opaDefaultAPIGroup))
}
return corev1.Container{
Name: opaContainerName,
Image: image,
Args: args,
Ports: []corev1.ContainerPort{
{
Name: gatewayOPAHTTPPortName,
ContainerPort: gatewayOPAHTTPPort,
Protocol: corev1.ProtocolTCP,
},
{
Name: gatewayOPAInternalPortName,
ContainerPort: gatewayOPAInternalPort,
Protocol: corev1.ProtocolTCP,
},
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(gatewayOPAInternalPort),
Scheme: uriScheme,
},
},
TimeoutSeconds: 2,
PeriodSeconds: 30,
FailureThreshold: 10,
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(gatewayOPAInternalPort),
Scheme: uriScheme,
},
},
TimeoutSeconds: 1,
PeriodSeconds: 5,
FailureThreshold: 12,
},
VolumeMounts: volumeMounts,
}
}
func configureServiceForOpenShiftLogging(s *corev1.ServiceSpec) error {
spec := corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: opaMetricsPortName,
Port: gatewayOPAInternalPort,
},
},
}
if err := mergo.Merge(s, spec, mergo.WithAppendSlice); err != nil {
return kverrors.Wrap(err, "failed to merge sidecar containers")
}
return nil
}
func configureServiceMonitorForOpenShiftLogging(sm *monitoringv1.ServiceMonitor, flags FeatureFlags) error {
var opaEndpoint monitoringv1.Endpoint
if flags.EnableTLSServiceMonitorConfig {
tlsConfig := sm.Spec.Endpoints[0].TLSConfig
opaEndpoint = monitoringv1.Endpoint{
Port: opaMetricsPortName,
Path: "/metrics",
Scheme: "https",
BearerTokenFile: BearerTokenFile,
TLSConfig: tlsConfig,
}
} else {
opaEndpoint = monitoringv1.Endpoint{
Port: opaMetricsPortName,
Path: "/metrics",
Scheme: "http",
}
}
spec := monitoringv1.ServiceMonitorSpec{
Endpoints: []monitoringv1.Endpoint{opaEndpoint},
}
if err := mergo.Merge(&sm.Spec, spec, mergo.WithAppendSlice); err != nil {
return kverrors.Wrap(err, "failed to merge sidecar service monitor endpoints")
}
return nil
}

@ -0,0 +1,438 @@
package manifests
import (
"testing"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/stretchr/testify/require"
lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1"
"github.com/ViaQ/loki-operator/internal/manifests/internal/gateway"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func TestApplyGatewayDefaultsOptions(t *testing.T) {
type tt struct {
desc string
opts *Options
want *Options
}
tc := []tt{
{
desc: "static mode",
opts: &Options{
Stack: lokiv1beta1.LokiStackSpec{
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.Static,
},
},
},
want: &Options{
Stack: lokiv1beta1.LokiStackSpec{
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.Static,
},
},
},
},
{
desc: "dynamic mode",
opts: &Options{
Stack: lokiv1beta1.LokiStackSpec{
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.Dynamic,
},
},
},
want: &Options{
Stack: lokiv1beta1.LokiStackSpec{
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.Dynamic,
},
},
},
},
{
desc: "openshift-logging mode",
opts: &Options{
Stack: lokiv1beta1.LokiStackSpec{
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.OpenshiftLogging,
},
},
},
want: &Options{
Stack: lokiv1beta1.LokiStackSpec{
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.OpenshiftLogging,
Authentication: []lokiv1beta1.AuthenticationSpec{
{
TenantName: "application",
TenantID: "",
OIDC: &lokiv1beta1.OIDCSpec{
IssuerURL: "https://127.0.0.1:5556/dex",
RedirectURL: "http://localhost:8080/oidc/application/callback",
UsernameClaim: "name",
},
},
{
TenantName: "infrastructure",
TenantID: "",
OIDC: &lokiv1beta1.OIDCSpec{
IssuerURL: "https://127.0.0.1:5556/dex",
RedirectURL: "http://localhost:8080/oidc/infrastructure/callback",
UsernameClaim: "name",
},
},
{
TenantName: "audit",
TenantID: "",
OIDC: &lokiv1beta1.OIDCSpec{
IssuerURL: "https://127.0.0.1:5556/dex",
RedirectURL: "http://localhost:8080/oidc/audit/callback",
UsernameClaim: "name",
},
},
},
Authorization: &lokiv1beta1.AuthorizationSpec{
OPA: &lokiv1beta1.OPASpec{
URL: "http://localhost:8082/data/lokistack/allow",
},
},
},
},
},
},
}
for _, tc := range tc {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
err := ApplyGatewayDefaultOptions(tc.opts)
require.NoError(t, err)
for i, a := range tc.opts.Stack.Tenants.Authentication {
a.TenantID = ""
tc.opts.Stack.Tenants.Authentication[i] = a
}
require.Equal(t, tc.want, tc.opts)
})
}
}
func TestConfigureDeploymentForMode(t *testing.T) {
type tt struct {
desc string
mode lokiv1beta1.ModeType
flags FeatureFlags
dpl *appsv1.DeploymentSpec
want *appsv1.DeploymentSpec
}
tc := []tt{
{
desc: "static mode",
mode: lokiv1beta1.Static,
dpl: &appsv1.DeploymentSpec{},
want: &appsv1.DeploymentSpec{},
},
{
desc: "dynamic mode",
mode: lokiv1beta1.Dynamic,
dpl: &appsv1.DeploymentSpec{},
want: &appsv1.DeploymentSpec{},
},
{
desc: "openshift-logging mode",
mode: lokiv1beta1.OpenshiftLogging,
dpl: &appsv1.DeploymentSpec{},
want: &appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "opa-openshift",
Image: "quay.io/observatorium/opa-openshift:latest",
Args: []string{
"--log.level=warn",
"--opa.package=lokistack",
"--web.listen=:8082",
"--web.internal.listen=:8083",
"--web.healthchecks.url=http://localhost:8082",
`--openshift.mappings=application=loki.openshift.io`,
`--openshift.mappings=infrastructure=loki.openshift.io`,
`--openshift.mappings=audit=loki.openshift.io`,
},
Ports: []corev1.ContainerPort{
{
Name: gatewayOPAHTTPPortName,
ContainerPort: gatewayOPAHTTPPort,
Protocol: corev1.ProtocolTCP,
},
{
Name: gatewayOPAInternalPortName,
ContainerPort: gatewayOPAInternalPort,
Protocol: corev1.ProtocolTCP,
},
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(gatewayOPAInternalPort),
Scheme: corev1.URISchemeHTTP,
},
},
TimeoutSeconds: 2,
PeriodSeconds: 30,
FailureThreshold: 10,
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(gatewayOPAInternalPort),
Scheme: corev1.URISchemeHTTP,
},
},
TimeoutSeconds: 1,
PeriodSeconds: 5,
FailureThreshold: 12,
},
},
},
},
},
},
},
{
desc: "openshift-logging mode with-tls-service-monitor-config",
mode: lokiv1beta1.OpenshiftLogging,
flags: FeatureFlags{
EnableTLSServiceMonitorConfig: true,
},
dpl: &appsv1.DeploymentSpec{},
want: &appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "opa-openshift",
Image: "quay.io/observatorium/opa-openshift:latest",
Args: []string{
"--log.level=warn",
"--opa.package=lokistack",
"--web.listen=:8082",
"--web.internal.listen=:8083",
"--web.healthchecks.url=http://localhost:8082",
"--tls.internal.server.cert-file=/var/run/tls/tls.crt",
"--tls.internal.server.key-file=/var/run/tls/tls.key",
`--openshift.mappings=application=loki.openshift.io`,
`--openshift.mappings=infrastructure=loki.openshift.io`,
`--openshift.mappings=audit=loki.openshift.io`,
},
Ports: []corev1.ContainerPort{
{
Name: gatewayOPAHTTPPortName,
ContainerPort: gatewayOPAHTTPPort,
Protocol: corev1.ProtocolTCP,
},
{
Name: gatewayOPAInternalPortName,
ContainerPort: gatewayOPAInternalPort,
Protocol: corev1.ProtocolTCP,
},
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(gatewayOPAInternalPort),
Scheme: corev1.URISchemeHTTPS,
},
},
TimeoutSeconds: 2,
PeriodSeconds: 30,
FailureThreshold: 10,
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(gatewayOPAInternalPort),
Scheme: corev1.URISchemeHTTPS,
},
},
TimeoutSeconds: 1,
PeriodSeconds: 5,
FailureThreshold: 12,
},
VolumeMounts: []corev1.VolumeMount{
{
Name: tlsMetricsSercetVolume,
ReadOnly: true,
MountPath: gateway.LokiGatewayTLSDir,
},
},
},
},
},
},
},
},
}
for _, tc := range tc {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
err := configureDeploymentForMode(tc.dpl, tc.mode, tc.flags)
require.NoError(t, err)
require.Equal(t, tc.want, tc.dpl)
})
}
}
func TestConfigureServiceForMode(t *testing.T) {
type tt struct {
desc string
mode lokiv1beta1.ModeType
svc *corev1.ServiceSpec
want *corev1.ServiceSpec
}
tc := []tt{
{
desc: "static mode",
mode: lokiv1beta1.Static,
svc: &corev1.ServiceSpec{},
want: &corev1.ServiceSpec{},
},
{
desc: "dynamic mode",
mode: lokiv1beta1.Dynamic,
svc: &corev1.ServiceSpec{},
want: &corev1.ServiceSpec{},
},
{
desc: "openshift-logging mode",
mode: lokiv1beta1.OpenshiftLogging,
svc: &corev1.ServiceSpec{},
want: &corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: opaMetricsPortName,
Port: gatewayOPAInternalPort,
},
},
},
},
}
for _, tc := range tc {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
err := configureServiceForMode(tc.svc, tc.mode)
require.NoError(t, err)
require.Equal(t, tc.want, tc.svc)
})
}
}
func TestConfigureServiceMonitorForMode(t *testing.T) {
type tt struct {
desc string
mode lokiv1beta1.ModeType
flags FeatureFlags
sm *monitoringv1.ServiceMonitor
want *monitoringv1.ServiceMonitor
}
tc := []tt{
{
desc: "static mode",
mode: lokiv1beta1.Static,
sm: &monitoringv1.ServiceMonitor{},
want: &monitoringv1.ServiceMonitor{},
},
{
desc: "dynamic mode",
mode: lokiv1beta1.Dynamic,
sm: &monitoringv1.ServiceMonitor{},
want: &monitoringv1.ServiceMonitor{},
},
{
desc: "openshift-logging mode",
mode: lokiv1beta1.OpenshiftLogging,
sm: &monitoringv1.ServiceMonitor{},
want: &monitoringv1.ServiceMonitor{
Spec: monitoringv1.ServiceMonitorSpec{
Endpoints: []monitoringv1.Endpoint{
{
Port: opaMetricsPortName,
Path: "/metrics",
Scheme: "http",
},
},
},
},
},
{
desc: "openshift-logging mode with-tls-service-monitor-config",
mode: lokiv1beta1.OpenshiftLogging,
flags: FeatureFlags{
EnableTLSServiceMonitorConfig: true,
},
sm: &monitoringv1.ServiceMonitor{
Spec: monitoringv1.ServiceMonitorSpec{
Endpoints: []monitoringv1.Endpoint{
{
TLSConfig: &monitoringv1.TLSConfig{
CAFile: "/path/to/ca/file",
CertFile: "/path/to/cert/file",
KeyFile: "/path/to/key/file",
},
},
},
},
},
want: &monitoringv1.ServiceMonitor{
Spec: monitoringv1.ServiceMonitorSpec{
Endpoints: []monitoringv1.Endpoint{
{
TLSConfig: &monitoringv1.TLSConfig{
CAFile: "/path/to/ca/file",
CertFile: "/path/to/cert/file",
KeyFile: "/path/to/key/file",
},
},
{
Port: opaMetricsPortName,
Path: "/metrics",
Scheme: "https",
BearerTokenFile: BearerTokenFile,
TLSConfig: &monitoringv1.TLSConfig{
CAFile: "/path/to/ca/file",
CertFile: "/path/to/cert/file",
KeyFile: "/path/to/key/file",
},
},
},
},
},
},
}
for _, tc := range tc {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
err := configureServiceMonitorForMode(tc.sm, tc.mode, tc.flags)
require.NoError(t, err)
require.Equal(t, tc.want, tc.sm)
})
}
}

@ -7,6 +7,8 @@ import (
lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
)
func TestNewGatewayDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
@ -102,3 +104,29 @@ func TestGatewayConfigMap_ReturnsSHA1OfBinaryContents(t *testing.T) {
require.NoError(t, err)
require.NotEmpty(t, sha1C)
}
func TestBuildGateway_HasConfigForTenantMode(t *testing.T) {
objs, err := BuildGateway(Options{
Name: "abcd",
Namespace: "efgh",
Flags: FeatureFlags{
EnableGateway: true,
},
Stack: lokiv1beta1.LokiStackSpec{
Template: &lokiv1beta1.LokiTemplateSpec{
Gateway: &lokiv1beta1.LokiComponentSpec{
Replicas: rand.Int31(),
},
},
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.OpenshiftLogging,
},
},
})
require.NoError(t, err)
d, ok := objs[1].(*appsv1.Deployment)
require.True(t, ok)
require.Len(t, d.Spec.Template.Spec.Containers, 2)
}

@ -89,17 +89,17 @@ func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet {
},
Ports: []corev1.ContainerPort{
{
Name: "metrics",
Name: lokiHTTPPortName,
ContainerPort: httpPort,
Protocol: protocolTCP,
},
{
Name: "grpc",
Name: lokiGRPCPortName,
ContainerPort: grpcPort,
Protocol: protocolTCP,
},
{
Name: "gossip-ring",
Name: lokiGossipPortName,
ContainerPort: gossipPort,
Protocol: protocolTCP,
},
@ -196,7 +196,7 @@ func NewIngesterGRPCService(opts Options) *corev1.Service {
ClusterIP: "None",
Ports: []corev1.ServicePort{
{
Name: "grpc",
Name: lokiGRPCPortName,
Port: grpcPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: grpcPort},
@ -226,7 +226,7 @@ func NewIngesterHTTPService(opts Options) *corev1.Service {
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "metrics",
Name: lokiHTTPPortName,
Port: httpPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: httpPort},

@ -6,7 +6,7 @@ import (
"io/ioutil"
"text/template"
"github.com/ViaQ/loki-operator/api/v1beta1"
lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1"
"github.com/ViaQ/logerr/kverrors"
)
@ -22,6 +22,14 @@ const (
LokiGatewayMountDir = "/etc/lokistack-gateway"
// LokiGatewayTLSDir is the path that is mounted from the configmap for TLS
LokiGatewayTLSDir = "/var/run/tls"
// LokiGatewayCABundleDir is the path that is mounted from the configmap for TLS
LokiGatewayCABundleDir = "/var/run/ca"
// LokiGatewayCAFile is the file name of the certificate authority file
LokiGatewayCAFile = "service-ca.crt"
// LokiGatewayCertFile is the file of the X509 server certificate file
LokiGatewayCertFile = "tls.crt"
// LokiGatewayKeyFile is the file name of the server private key
LokiGatewayKeyFile = "tls.key"
)
var (
@ -64,7 +72,7 @@ func Build(opts Options) (rbacCfg []byte, tenantsCfg []byte, regoCfg []byte, err
return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}
// Build loki gateway observatorium rego for static mode
if opts.Stack.Tenants.Mode == v1beta1.Static {
if opts.Stack.Tenants.Mode == lokiv1beta1.Static {
w = bytes.NewBuffer(nil)
err = lokiStackGatewayRegoTmpl.Execute(w, opts)
if err != nil {

@ -205,16 +205,62 @@ tenants:
Stack: lokiv1beta1.LokiStackSpec{
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.OpenshiftLogging,
Authentication: []lokiv1beta1.AuthenticationSpec{
{
TenantName: "application",
TenantID: "32e45e3e-b760-43a2-a7e1-02c5631e56e9",
OIDC: &lokiv1beta1.OIDCSpec{
IssuerURL: "https://127.0.0.1:5556/dex",
RedirectURL: "https://localhost:8443/oidc/application/callback",
UsernameClaim: "name",
},
},
{
TenantName: "infrastructure",
TenantID: "40de0532-10a2-430c-9a00-62c46455c118",
OIDC: &lokiv1beta1.OIDCSpec{
IssuerURL: "https://127.0.0.1:5556/dex",
RedirectURL: "https://localhost:8443/oidc/infrastructure/callback",
UsernameClaim: "name",
},
},
{
TenantName: "audit",
TenantID: "26d7c49d-182e-4d93-bade-510c6cc3243d",
OIDC: &lokiv1beta1.OIDCSpec{
IssuerURL: "https://127.0.0.1:5556/dex",
RedirectURL: "https://localhost:8443/oidc/audit/callback",
UsernameClaim: "name",
},
},
},
Authorization: &lokiv1beta1.AuthorizationSpec{
OPA: &lokiv1beta1.OPASpec{
URL: "http://127.0.0.1:8080/v1/data/lokistack/allow",
},
},
},
},
Namespace: "test-ns",
Name: "test",
TenantSecrets: []*Secret{
{
TenantName: "test-a",
TenantName: "application",
ClientID: "test",
ClientSecret: "test123",
IssuerCAPath: "/tmp/ca/path",
ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0",
IssuerCAPath: "./tmp/certs/ca.pem",
},
{
TenantName: "infrastructure",
ClientID: "test",
ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0",
IssuerCAPath: "./tmp/certs/ca.pem",
},
{
TenantName: "audit",
ClientID: "test",
ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0",
IssuerCAPath: "./tmp/certs/ca.pem",
},
},
}

@ -17,24 +17,22 @@ tenants:
issuerCAPath: {{ $secret.IssuerCAPath }}
{{- end -}}
{{- end -}}
{{- end -}}
{{ print "\n" }}
{{- end }}
issuerURL: {{ $spec.OIDC.IssuerURL }}
redirectURL: {{ $spec.OIDC.RedirectURL }}
{{ if $spec.OIDC.UsernameClaim }}
usernameClaim: {{ $spec.OIDC.UsernameClaim }}
{{- end -}}
{{ if $spec.OIDC.GroupClaim }}
{{- if $spec.OIDC.GroupClaim }}
groupClaim: {{ $spec.OIDC.GroupClaim }}
{{- end -}}
{{ print "\n" }}
{{- end }}
opa:
query: data.lokistack.allow
paths:
- /etc/lokistack-gateway/rbac.yaml
- /etc/lokistack-gateway/lokistack-gateway.rego
{{- end -}}
{{- else if eq $l.Stack.Tenants.Mode "dynamic" -}}
{{- else -}}
{{- if $tenant := $l.Stack.Tenants -}}
{{- range $spec := $tenant.Authentication }}
- name: {{ $spec.TenantName }}
@ -45,65 +43,25 @@ tenants:
{{ if $secret.ClientID }}
clientID: {{ $secret.ClientID }}
{{- end -}}
{{ if $secret.ClientID }}
{{ if $secret.ClientSecret }}
clientSecret: {{ $secret.ClientSecret }}
{{- end -}}
{{ if $secret.ClientID }}
{{ if $secret.IssuerCAPath }}
issuerCAPath: {{ $secret.IssuerCAPath }}
{{- end -}}
{{- end -}}
{{- end -}}
{{ print "\n" }}
{{- end }}
issuerURL: {{ $spec.OIDC.IssuerURL }}
redirectURL: {{ $spec.OIDC.RedirectURL }}
{{ if $spec.OIDC.UsernameClaim }}
{{- if $spec.OIDC.UsernameClaim }}
usernameClaim: {{ $spec.OIDC.UsernameClaim }}
{{- end -}}
{{ if $spec.OIDC.GroupClaim }}
{{- if $spec.OIDC.GroupClaim }}
groupClaim: {{ $spec.OIDC.GroupClaim }}
{{- end -}}
{{ print "\n" }}
{{- end }}
opa:
url: {{ $tenant.Authorization.OPA.URL }}
{{- end -}}
{{- end -}}
{{- else -}}
{{ print "\n" }}
- name: application
id: 32e45e3e-b760-43a2-a7e1-02c5631e56e9
oidc:
clientID: test
# TODO - these need to be replaced once we integrate dex into lokistack-gateway.
clientSecret: ZXhhbXBsZS1hcHAtc2VjcmV0
issuerCAPath: ./tmp/certs/ca.pem
issuerURL: https://127.0.0.1:5556/dex
redirectURL: https://localhost:8443/oidc/application/callback
usernameClaim: name
opa:
url: http://127.0.0.1:8080/v1/data/lokistack/allow
- name: infrastructure
id: 40de0532-10a2-430c-9a00-62c46455c118
oidc:
clientID: test
# TODO - these need to be replaced once we integrate dex into lokistack-gateway.
clientSecret: ZXhhbXBsZS1hcHAtc2VjcmV0
issuerCAPath: ./tmp/certs/ca.pem
issuerURL: https://127.0.0.1:5556/dex
redirectURL: https://localhost:8443/oidc/infrastructure/callback
usernameClaim: name
opa:
url: http://127.0.0.1:8080/v1/data/lokistack/allow
- name: audit
id: 26d7c49d-182e-4d93-bade-510c6cc3243d
oidc:
clientID: test
# TODO - these need to be replaced once we integrate dex into lokistack-gateway.
clientSecret: ZXhhbXBsZS1hcHAtc2VjcmV0
issuerCAPath: ./tmp/certs/ca.pem
issuerURL: https://127.0.0.1:5556/dex
redirectURL: https://localhost:8443/oidc/audit/callback
usernameClaim: name
opa:
url: http://127.0.0.1:8080/v1/data/lokistack/allow
{{- end -}}
{{- end -}}

@ -24,7 +24,7 @@ func BuildLokiGossipRingService(stackName string) *corev1.Service {
ClusterIP: "None",
Ports: []corev1.ServicePort{
{
Name: "gossip",
Name: lokiGossipPortName,
Port: gossipPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: gossipPort},

@ -89,17 +89,17 @@ func NewQuerierStatefulSet(opts Options) *appsv1.StatefulSet {
},
Ports: []corev1.ContainerPort{
{
Name: "metrics",
Name: lokiHTTPPortName,
ContainerPort: httpPort,
Protocol: protocolTCP,
},
{
Name: "grpc",
Name: lokiGRPCPortName,
ContainerPort: grpcPort,
Protocol: protocolTCP,
},
{
Name: "gossip-ring",
Name: lokiGossipPortName,
ContainerPort: gossipPort,
Protocol: protocolTCP,
},
@ -197,7 +197,7 @@ func NewQuerierGRPCService(opts Options) *corev1.Service {
ClusterIP: "None",
Ports: []corev1.ServicePort{
{
Name: "grpc",
Name: lokiGRPCPortName,
Port: grpcPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: grpcPort},
@ -227,7 +227,7 @@ func NewQuerierHTTPService(opts Options) *corev1.Service {
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "http",
Name: lokiHTTPPortName,
Port: httpPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: httpPort},

@ -94,12 +94,12 @@ func NewQueryFrontendDeployment(opts Options) *appsv1.Deployment {
},
Ports: []corev1.ContainerPort{
{
Name: "metrics",
Name: lokiHTTPPortName,
ContainerPort: httpPort,
Protocol: protocolTCP,
},
{
Name: "grpc",
Name: lokiGRPCPortName,
ContainerPort: grpcPort,
Protocol: protocolTCP,
},
@ -177,7 +177,7 @@ func NewQueryFrontendGRPCService(opts Options) *corev1.Service {
ClusterIP: "None",
Ports: []corev1.ServicePort{
{
Name: "grpc",
Name: lokiGRPCPortName,
Port: grpcPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: grpcPort},
@ -207,7 +207,7 @@ func NewQueryFrontendHTTPService(opts Options) *corev1.Service {
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "http",
Name: lokiHTTPPortName,
Port: httpPort,
Protocol: protocolTCP,
TargetPort: intstr.IntOrString{IntVal: httpPort},

@ -29,7 +29,7 @@ func NewDistributorServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(DistributorName(opts.Name))
serviceName := serviceNameDistributorHTTP(opts.Name)
lokiEndpoint := serviceMonitorLokiEndPoint(opts.Name, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@ -40,7 +40,7 @@ func NewIngesterServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(IngesterName(opts.Name))
serviceName := serviceNameIngesterHTTP(opts.Name)
lokiEndpoint := serviceMonitorLokiEndPoint(opts.Name, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@ -51,7 +51,7 @@ func NewQuerierServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(QuerierName(opts.Name))
serviceName := serviceNameQuerierHTTP(opts.Name)
lokiEndpoint := serviceMonitorLokiEndPoint(opts.Name, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@ -62,7 +62,7 @@ func NewCompactorServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(CompactorName(opts.Name))
serviceName := serviceNameCompactorHTTP(opts.Name)
lokiEndpoint := serviceMonitorLokiEndPoint(opts.Name, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@ -73,7 +73,7 @@ func NewQueryFrontendServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(QueryFrontendName(opts.Name))
serviceName := serviceNameQueryFrontendHTTP(opts.Name)
lokiEndpoint := serviceMonitorLokiEndPoint(opts.Name, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@ -84,9 +84,17 @@ func NewGatewayServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(GatewayName(opts.Name))
serviceName := serviceNameGatewayHTTP(opts.Name)
lokiEndpoint := serviceMonitorLokiEndPoint(opts.Name, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
gwEndpoint := serviceMonitorEndpoint(gatewayInternalPortName, serviceName, opts.Namespace, opts.Flags.EnableTLSServiceMonitorConfig)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
sm := newServiceMonitor(opts.Namespace, serviceMonitorName, l, gwEndpoint)
if opts.Stack.Tenants != nil {
if err := configureServiceMonitorForMode(sm, opts.Stack.Tenants.Mode, opts.Flags); err != nil {
return sm
}
}
return sm
}
func newServiceMonitor(namespace, serviceMonitorName string, labels labels.Set, endpoint monitoringv1.Endpoint) *monitoringv1.ServiceMonitor {

@ -9,6 +9,7 @@ import (
lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test that all serviceMonitor match the labels of their services so that we know all serviceMonitor
@ -95,3 +96,33 @@ func TestServiceMonitorMatchLabels(t *testing.T) {
})
}
}
func TestServiceMonitorEndpoints_ForOpenShiftLoggingMode(t *testing.T) {
flags := FeatureFlags{
EnableGateway: true,
EnableCertificateSigningService: true,
EnableServiceMonitors: true,
EnableTLSServiceMonitorConfig: true,
}
opt := Options{
Name: "test",
Namespace: "test",
Image: "test",
Flags: flags,
Stack: lokiv1beta1.LokiStackSpec{
Size: lokiv1beta1.SizeOneXExtraSmall,
Tenants: &lokiv1beta1.TenantsSpec{
Mode: lokiv1beta1.OpenshiftLogging,
},
Template: &lokiv1beta1.LokiTemplateSpec{
Gateway: &lokiv1beta1.LokiComponentSpec{
Replicas: 1,
},
},
},
}
sm := NewGatewayServiceMonitor(opt)
require.Len(t, sm.Spec.Endpoints, 2)
}

@ -13,6 +13,21 @@ const (
httpPort = 3100
grpcPort = 9095
protocolTCP = "TCP"
lokiHTTPPortName = "metrics"
lokiGRPCPortName = "grpc"
lokiGossipPortName = "gossip-ring"
gatewayHTTPPort = 8080
gatewayInternalPort = 8081
gatewayOPAHTTPPort = 8082
gatewayOPAInternalPort = 8083
gatewayHTTPPortName = "public"
gatewayInternalPortName = "metrics"
gatewayOPAHTTPPortName = "public"
gatewayOPAInternalPortName = "opa-metrics"
// DefaultContainerImage declares the default fallback for loki image.
DefaultContainerImage = "docker.io/grafana/loki:2.2.1"
@ -26,9 +41,7 @@ const (
// labelJobComponent is a ServiceMonitor.Spec.JobLabel.
labelJobComponent string = "loki.grafana.com/component"
)
const (
// LabelCompactorComponent is the label value for the compactor component
LabelCompactorComponent string = "compactor"
// LabelDistributorComponent is the label value for the distributor component
@ -41,6 +54,8 @@ const (
LabelQueryFrontendComponent string = "query-frontend"
// LabelGatewayComponent is the label value for the lokiStack-gateway component
LabelGatewayComponent string = "lokistack-gateway"
openShiftServingCertKey = "service.beta.openshift.io/serving-cert-secret-name"
)
var (
@ -65,7 +80,7 @@ func commonLabels(stackName string) map[string]string {
func serviceAnnotations(serviceName string, enableSigningService bool) map[string]string {
annotations := map[string]string{}
if enableSigningService {
annotations["service.beta.openshift.io/serving-cert-secret-name"] = signingServiceSecretName(serviceName)
annotations[openShiftServingCertKey] = signingServiceSecretName(serviceName)
}
return annotations
}
@ -181,12 +196,12 @@ func serviceMonitorTLSConfig(serviceName, namespace string) monitoringv1.TLSConf
}
}
// serviceMonitorLokiEndPoint returns the loki endpoint for service monitors.
func serviceMonitorLokiEndPoint(stackName, serviceName, namespace string, enableTLS bool) monitoringv1.Endpoint {
// serviceMonitorEndpoint returns the lokistack endpoint for service monitors.
func serviceMonitorEndpoint(portName, serviceName, namespace string, enableTLS bool) monitoringv1.Endpoint {
if enableTLS {
tlsConfig := serviceMonitorTLSConfig(serviceName, namespace)
return monitoringv1.Endpoint{
Port: stackName,
Port: portName,
Path: "/metrics",
Scheme: "https",
BearerTokenFile: BearerTokenFile,
@ -195,7 +210,7 @@ func serviceMonitorLokiEndPoint(stackName, serviceName, namespace string, enable
}
return monitoringv1.Endpoint{
Port: stackName,
Port: portName,
Path: "/metrics",
Scheme: "http",
}

Loading…
Cancel
Save