chore(ksonnet): Simplify configuration of ingester deployment (#10542)

Simplify deployment with ksonnet: Remove `stateful_ingesters` flag, because ingesters should always be deployed as StatefulSet with WAL (write ahead log) enabled.


Signed-off-by: Christian Haudum <christian.haudum@gmail.com>
pull/10991/head
Christian Haudum 2 years ago committed by GitHub
parent 9d9b05ad40
commit b43e253925
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      CHANGELOG.md
  2. 1
      production/ksonnet/enterprise-logs/main.libsonnet
  3. 22
      production/ksonnet/loki/config.libsonnet
  4. 63
      production/ksonnet/loki/ingester.libsonnet
  5. 3
      production/ksonnet/loki/loki.libsonnet
  6. 162
      production/ksonnet/loki/multi-zone.libsonnet
  7. 70
      production/ksonnet/loki/rollout-operator.libsonnet
  8. 2
      production/ksonnet/loki/shipper.libsonnet
  9. 45
      production/ksonnet/loki/wal.libsonnet

@ -70,6 +70,8 @@
* [10784](https://github.com/grafana/loki/pull/10894) **slim-bean** Update index gateway client to use a headless service.
* [10542](https://github.com/grafana/loki/pull/10542) **chaudum**: Remove legacy deployment mode for ingester (Deployment, without WAL) and instead always run them as StatefulSet.
## 2.9.2 (2023-10-16)
### All Changes

@ -62,7 +62,6 @@ loki {
},
ingester_pvc_size: '50Gi',
stateful_ingesters: true,
querier_pvc_size: '50Gi',
stateful_queriers: true,

@ -12,16 +12,20 @@
grpc_server_max_msg_size: 100 << 20, // 100MB
wal_enabled: true,
query_scheduler_enabled: false,
overrides_exporter_enabled: false,
// flags for running ingesters/queriers as a statefulset instead of deployment type.
// WAL enabled configurations automatically use statefulsets.
stateful_ingesters: false,
ingester_pvc_size: '10Gi',
ingester_pvc_class: 'fast',
ingester_data_disk_size: self.ingester_pvc_size, // keep backwards compatibility
ingester_data_disk_class: self.ingester_pvc_class, // keep backwards compatibility
ingester_wal_disk_size: '150Gi',
ingester_wal_disk_class: 'fast',
ingester_allow_multiple_replicas_on_same_node: false,
stateful_queriers: false,
querier_pvc_size: '10Gi',
querier_pvc_class: 'fast',
@ -80,10 +84,6 @@
topology_spread_max_skew: 1,
},
ingester_allow_multiple_replicas_on_same_node: false,
ingester_data_disk_size: '10Gi',
ingester_data_disk_class: 'fast',
// Bigtable variables
bigtable_instance: error 'must specify bigtable instance',
bigtable_project: error 'must specify bigtable project',
@ -231,6 +231,12 @@
chunk_idle_period: '15m',
chunk_block_size: 262144,
wal+: {
enabled: true,
dir: '/loki/wal',
replay_memory_ceiling: '7GB', // should be set upto ~50% of available memory
},
lifecycler: {
ring: {
heartbeat_timeout: '1m',

@ -4,22 +4,26 @@ local k = import 'ksonnet-util/kausal.libsonnet';
local pvc = k.core.v1.persistentVolumeClaim,
local volumeMount = k.core.v1.volumeMount,
local statefulSet = k.apps.v1.statefulSet,
local podDisruptionBudget = k.policy.v1.podDisruptionBudget,
local name = 'ingester',
// The ingesters should persist TSDB blocks and WAL on a persistent
// volume in order to be crash resilient.
local ingester_data_pvc =
pvc.new() +
ingester_data_pvc::
pvc.new('ingester-data') +
pvc.mixin.spec.resources.withRequests({ storage: $._config.ingester_data_disk_size }) +
pvc.mixin.spec.withAccessModes(['ReadWriteOnce']) +
pvc.mixin.spec.withStorageClassName($._config.ingester_data_disk_class) +
pvc.mixin.metadata.withName('ingester-data'),
pvc.mixin.spec.withStorageClassName($._config.ingester_data_disk_class),
newIngesterStatefulSet(name, container, with_anti_affinity=true)::
// local ingesterContainer = container + $.core.v1.container.withVolumeMountsMixin([
// volumeMount.new('ingester-data', '/data'),
// ]);
ingester_wal_pvc::
pvc.new('ingester-wal') +
pvc.mixin.spec.resources.withRequests({ storage: $._config.ingester_wal_disk_size }) +
pvc.mixin.spec.withAccessModes(['ReadWriteOnce']) +
pvc.mixin.spec.withStorageClassName($._config.ingester_wal_disk_class),
$.newLokiStatefulSet(name, 3, container, ingester_data_pvc) +
newIngesterStatefulSet(name, container, with_anti_affinity=true)::
$.newLokiStatefulSet(name, 3, container, [self.ingester_data_pvc, self.ingester_wal_pvc]) +
// When the ingester needs to flush blocks to the storage, it may take quite a lot of time.
// For this reason, we grant an high termination period (80 minutes).
statefulSet.mixin.spec.template.spec.withTerminationGracePeriodSeconds(4800) +
@ -42,42 +46,19 @@ local k = import 'ksonnet-util/kausal.libsonnet';
container.mixin.readinessProbe.httpGet.withPort($._config.http_listen_port) +
container.mixin.readinessProbe.withInitialDelaySeconds(15) +
container.mixin.readinessProbe.withTimeoutSeconds(1) +
k.util.resourcesRequests('1', '5Gi') +
k.util.resourcesLimits('2', '10Gi') +
k.util.resourcesRequests('1', '7Gi') +
k.util.resourcesLimits('2', '14Gi') +
container.withEnvMixin($._config.commonEnvs) +
if $._config.stateful_ingesters then
container.withVolumeMountsMixin([
volumeMount.new('ingester-data', '/data'),
]) else {},
container.withVolumeMountsMixin([
volumeMount.new('ingester-data', '/data'),
volumeMount.new('ingester-wal', $._config.loki.ingester.wal.dir),
]),
local deployment = k.apps.v1.deployment,
local name = 'ingester',
ingester_deployment: if !$._config.stateful_ingesters then
deployment.new(name, 3, [$.ingester_container]) +
$.config_hash_mixin +
k.util.configVolumeMount('loki', '/etc/loki/config') +
k.util.configVolumeMount(
$._config.overrides_configmap_mount_name,
$._config.overrides_configmap_mount_path,
) +
k.util.antiAffinity +
deployment.mixin.spec.withMinReadySeconds(60) +
deployment.mixin.spec.strategy.rollingUpdate.withMaxSurge(0) +
deployment.mixin.spec.strategy.rollingUpdate.withMaxUnavailable(1) +
deployment.mixin.spec.template.spec.withTerminationGracePeriodSeconds(4800)
else {},
ingester_statefulset: self.newIngesterStatefulSet('ingester', $.ingester_container, !$._config.ingester_allow_multiple_replicas_on_same_node),
ingester_statefulset:
self.newIngesterStatefulSet('ingester', $.ingester_container, !$._config.ingester_allow_multiple_replicas_on_same_node),
ingester_service:
if !$._config.stateful_ingesters then
k.util.serviceFor($.ingester_deployment, $._config.service_ignored_labels)
else
k.util.serviceFor($.ingester_statefulset, $._config.service_ignored_labels),
local podDisruptionBudget = k.policy.v1.podDisruptionBudget,
k.util.serviceFor($.ingester_statefulset, $._config.service_ignored_labels),
ingester_pdb:
podDisruptionBudget.new('loki-ingester-pdb') +

@ -21,9 +21,6 @@
(import 'memcached.libsonnet') +
(import 'overrides-exporter.libsonnet') +
// WAL support
(import 'wal.libsonnet') +
// Index Gateway support
(import 'index-gateway.libsonnet') +

@ -1,22 +1,18 @@
local rolloutOperator = import 'rollout-operator.libsonnet';
{
local container = $.core.v1.container,
local deployment = $.apps.v1.deployment,
local statefulSet = $.apps.v1.statefulSet,
local topologySpreadConstraints = $.core.v1.topologySpreadConstraint,
local podDisruptionBudget = $.policy.v1.podDisruptionBudget,
local volume = $.core.v1.volume,
local roleBinding = $.rbac.v1.roleBinding,
local role = $.rbac.v1.role,
local service = $.core.v1.service,
local serviceAccount = $.core.v1.serviceAccount,
local servicePort = $.core.v1.servicePort,
local policyRule = $.rbac.v1.policyRule,
local podAntiAffinity = deployment.mixin.spec.template.spec.affinity.podAntiAffinity,
local pvc = $.core.v1.persistentVolumeClaim,
_images+:: {
rollout_operator: 'grafana/rollout-operator:v0.1.1',
},
local podAntiAffinity = deployment.mixin.spec.template.spec.affinity.podAntiAffinity,
_config+: {
_config+:: {
multi_zone_ingester_enabled: true,
multi_zone_ingester_migration_enabled: false,
multi_zone_ingester_replication_write_path_enabled: true,
@ -25,6 +21,17 @@
multi_zone_ingester_max_unavailable: std.max(1, std.floor($._config.multi_zone_ingester_replicas / 9)),
multi_zone_default_ingester_zone: false,
multi_zone_ingester_exclude_default: false,
multi_zone_ingester_name_prefix: 'ingester-zone',
// If use_topology_spread is true, ingesters can run on nodes already running ingesters but will be
// spread through the available nodes using a TopologySpreadConstraints with a max skew
// of topology_spread_max_skew.
// See: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
// If use_topology_spread is false, ingesters will not be scheduled on nodes already running ingesters.
multi_zone_ingester_use_topology_spread: false,
multi_zone_ingester_topology_spread_max_skew: 1,
node_selector: null,
},
// Zone-aware replication.
@ -49,16 +56,43 @@
ingester_zone_b_args:: {},
ingester_zone_c_args:: {},
// For migration purposes we need to be able to configure a zone for single ingester statefulset deployments.
ingester_container+:: if !$._config.multi_zone_default_ingester_zone then {} else
container.withArgs($.util.mapToFlags($.ingester_args {
'ingester.availability-zone': 'zone-default',
})),
newIngesterZoneContainer(zone, zone_args)::
local zone_name = 'zone-%s' % zone;
// remove after upstream PR is merged and is in a K release
// functions for k8s objects
newLokiPdb(deploymentName, maxUnavailable=1)::
local pdbName = '%s-pdb' % deploymentName;
podDisruptionBudget.new() +
podDisruptionBudget.mixin.metadata.withName(pdbName) +
podDisruptionBudget.mixin.metadata.withLabels({ name: pdbName }) +
podDisruptionBudget.mixin.spec.selector.withMatchLabels({ name: deploymentName }) +
podDisruptionBudget.mixin.spec.withMaxUnavailable(maxUnavailable),
newIngesterPdb(ingesterName)::
$.newLokiPdb(ingesterName),
newLokiStatefulSet(name, replicas, container, pvc, podManagementPolicy='Parallel')::
statefulSet.new(name, replicas, container, pvc) +
statefulSet.mixin.spec.withServiceName(name) +
statefulSet.mixin.spec.template.metadata.withLabels({ name: name }) +
statefulSet.mixin.spec.selector.withMatchLabels({ name: name }) +
statefulSet.mixin.spec.template.spec.securityContext.withFsGroup(10001) + // 10001 is the group ID assigned to Loki in the Dockerfile
statefulSet.mixin.spec.updateStrategy.withType('RollingUpdate') +
$.config_hash_mixin +
(if podManagementPolicy != null then statefulSet.mixin.spec.withPodManagementPolicy(podManagementPolicy) else {}) +
(if !std.isObject($._config.node_selector) then {} else statefulSet.mixin.spec.template.spec.withNodeSelectorMixin($._config.node_selector)) +
$.util.configVolumeMount('loki', '/etc/loki/config') +
$.util.configVolumeMount(
$._config.overrides_configmap_mount_name,
$._config.overrides_configmap_mount_path,
),
newIngesterZoneContainer(zone, zone_args)::
$.ingester_container +
container.withArgs($.util.mapToFlags(
$.ingester_args + zone_args + {
@ -67,24 +101,31 @@
)),
newIngesterZoneStatefulSet(zone, container)::
local name = 'ingester-zone-%s' % zone;
local name = '%(prefix)s-%(zone)s' % { prefix: $._config.multi_zone_ingester_name_prefix, zone: zone };
// We can turn off anti-affinity for zone aware statefulsets since it's safe to
// deploy multiple ingesters from the same zone on the same node.
$.newIngesterStatefulSet(name, container, with_anti_affinity=false) +
self.newIngesterStatefulSet(name, container, with_anti_affinity=false) +
statefulSet.mixin.metadata.withLabels({ 'rollout-group': 'ingester' }) +
statefulSet.mixin.metadata.withAnnotations({ 'rollout-max-unavailable': std.toString($._config.multi_zone_ingester_max_unavailable) }) +
statefulSet.mixin.spec.template.metadata.withLabels({ name: name, 'rollout-group': 'ingester' }) +
statefulSet.mixin.spec.selector.withMatchLabels({ name: name, 'rollout-group': 'ingester' }) +
statefulSet.mixin.spec.updateStrategy.withType('OnDelete') +
statefulSet.mixin.spec.template.spec.withTerminationGracePeriodSeconds(4800) +
statefulSet.spec.withVolumeClaimTemplatesMixin($.ingester_wal_pvc) +
statefulSet.mixin.spec.withReplicas(std.ceil($._config.multi_zone_ingester_replicas / 3)) +
(
if $._config.multi_zone_ingester_use_topology_spread then
statefulSet.spec.template.spec.withTopologySpreadConstraints(
// Evenly spread queriers among available nodes.
topologySpreadConstraints.labelSelector.withMatchLabels({ name: name }) +
topologySpreadConstraints.withTopologyKey('kubernetes.io/hostname') +
topologySpreadConstraints.withWhenUnsatisfiable('ScheduleAnyway') +
topologySpreadConstraints.withMaxSkew($._config.multi_zone_ingester_topology_spread_max_skew),
)
else {}
) +
(if !std.isObject($._config.node_selector) then {} else statefulSet.mixin.spec.template.spec.withNodeSelectorMixin($._config.node_selector)) +
if $._config.ingester_allow_multiple_replicas_on_same_node then {} else {
spec+:
// Allow to schedule 2+ ingesters in the same zone on the same node, but do not schedule 2+ ingesters in
// different zones on the samee node. In case of 1 node failure in the Kubernetes cluster, only ingesters
// different zones on the same node. In case of 1 node failure in the Kubernetes cluster, only ingesters
// in 1 zone will be affected.
podAntiAffinity.withRequiredDuringSchedulingIgnoredDuringExecution([
podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecutionType.new() +
@ -104,40 +145,39 @@
$.util.serviceFor(sts, $._config.service_ignored_labels) +
service.mixin.spec.withClusterIp('None'), // Headless.
ingester_zone_a_container:: if !$._config.multi_zone_ingester_enabled then {} else
ingester_zone_a_container:: if !$._config.multi_zone_ingester_enabled then null else
self.newIngesterZoneContainer('a', $.ingester_zone_a_args),
ingester_zone_a_statefulset: if !$._config.multi_zone_ingester_enabled then {} else
self.newIngesterZoneStatefulSet('a', $.ingester_zone_a_container),
ingester_zone_a_service: if !$._config.multi_zone_ingester_enabled then {} else
ingester_zone_a_service: if !$._config.multi_zone_ingester_enabled then null else
$.newIngesterZoneService($.ingester_zone_a_statefulset),
ingester_zone_b_container:: if !$._config.multi_zone_ingester_enabled then {} else
ingester_zone_b_container:: if !$._config.multi_zone_ingester_enabled then null else
self.newIngesterZoneContainer('b', $.ingester_zone_b_args),
ingester_zone_b_statefulset: if !$._config.multi_zone_ingester_enabled then {} else
self.newIngesterZoneStatefulSet('b', $.ingester_zone_b_container),
ingester_zone_b_service: if !$._config.multi_zone_ingester_enabled then {} else
ingester_zone_b_service: if !$._config.multi_zone_ingester_enabled then null else
$.newIngesterZoneService($.ingester_zone_b_statefulset),
ingester_zone_c_container:: if !$._config.multi_zone_ingester_enabled then {} else
ingester_zone_c_container:: if !$._config.multi_zone_ingester_enabled then null else
self.newIngesterZoneContainer('c', $.ingester_zone_c_args),
ingester_zone_c_statefulset: if !$._config.multi_zone_ingester_enabled then {} else
self.newIngesterZoneStatefulSet('c', $.ingester_zone_c_container),
ingester_zone_c_service: if !$._config.multi_zone_ingester_enabled then {} else
ingester_zone_c_service: if !$._config.multi_zone_ingester_enabled then null else
$.newIngesterZoneService($.ingester_zone_c_statefulset),
ingester_rollout_pdb: if !$._config.multi_zone_ingester_enabled then {} else
ingester_rollout_pdb: if !$._config.multi_zone_ingester_enabled then null else
podDisruptionBudget.new('ingester-rollout-pdb') +
podDisruptionBudget.mixin.metadata.withLabels({ name: 'ingester-rollout-pdb' }) +
podDisruptionBudget.mixin.spec.selector.withMatchLabels({ 'rollout-group': 'ingester' }) +
podDisruptionBudget.mixin.spec.withMaxUnavailable(1),
// Single-zone ingesters shouldn't be configured when multi-zone is enabled.
ingester_statefulset:
// Remove the default "ingester" StatefulSet if multi-zone is enabled and no migration is in progress.
if $._config.multi_zone_ingester_enabled && !$._config.multi_zone_ingester_migration_enabled
@ -147,7 +187,7 @@
ingester_service:
// Remove the default "ingester" service if multi-zone is enabled and no migration is in progress.
if $._config.multi_zone_ingester_enabled && !$._config.multi_zone_ingester_migration_enabled
then {}
then null
else super.ingester_service,
ingester_pdb:
@ -158,65 +198,7 @@
else if $._config.multi_zone_ingester_migration_enabled
then super.ingester_pdb + podDisruptionBudget.mixin.spec.withMaxUnavailable(0)
// Remove it if multi-zone is enabled and no migration is in progress.
else {},
// Rollout operator.
local rollout_operator_enabled = $._config.multi_zone_ingester_enabled,
rollout_operator_args:: {
'kubernetes.namespace': $._config.namespace,
},
rollout_operator_container::
container.new('rollout-operator', $._images.rollout_operator) +
container.withArgsMixin($.util.mapToFlags($.rollout_operator_args)) +
container.withPorts([
$.core.v1.containerPort.new('http-metrics', 8001),
]) +
$.util.resourcesRequests('100m', '100Mi') +
$.util.resourcesLimits('1', '200Mi') +
container.mixin.readinessProbe.httpGet.withPath('/ready') +
container.mixin.readinessProbe.httpGet.withPort(8001) +
container.mixin.readinessProbe.withInitialDelaySeconds(5) +
container.mixin.readinessProbe.withTimeoutSeconds(1),
rollout_operator_deployment: if !rollout_operator_enabled then {} else
deployment.new('rollout-operator', 1, [$.rollout_operator_container]) +
deployment.mixin.metadata.withName('rollout-operator') +
deployment.mixin.spec.template.spec.withServiceAccountName('rollout-operator') +
// Ensure Kubernetes doesn't run 2 operators at the same time.
deployment.mixin.spec.strategy.rollingUpdate.withMaxSurge(0) +
deployment.mixin.spec.strategy.rollingUpdate.withMaxUnavailable(1),
rollout_operator_role: if !rollout_operator_enabled then {} else
role.new('rollout-operator-role') +
role.mixin.metadata.withNamespace($._config.namespace) +
role.withRulesMixin([
policyRule.withApiGroups('') +
policyRule.withResources(['pods']) +
policyRule.withVerbs(['list', 'get', 'watch', 'delete']),
policyRule.withApiGroups('apps') +
policyRule.withResources(['statefulsets']) +
policyRule.withVerbs(['list', 'get', 'watch']),
policyRule.withApiGroups('apps') +
policyRule.withResources(['statefulsets/status']) +
policyRule.withVerbs(['update']),
]),
rollout_operator_rolebinding: if !rollout_operator_enabled then {} else
roleBinding.new('rollout-operator-rolebinding') +
roleBinding.mixin.metadata.withNamespace($._config.namespace) +
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
roleBinding.mixin.roleRef.withKind('Role') +
roleBinding.mixin.roleRef.withName('rollout-operator-role') +
roleBinding.withSubjectsMixin({
kind: 'ServiceAccount',
name: 'rollout-operator',
namespace: $._config.namespace,
}),
rollout_operator_service_account: if !rollout_operator_enabled then {} else
serviceAccount.new('rollout-operator'),
else null,
} + {
distributor_args+:: if $._config.multi_zone_ingester_exclude_default then {
'distributor.excluded-zones': 'zone-default',
@ -225,4 +207,4 @@
ruler_args+:: if $._config.multi_zone_ingester_exclude_default then {
'distributor.excluded-zones': 'zone-default',
} else {},
}
} + rolloutOperator

@ -0,0 +1,70 @@
{
local container = $.core.v1.container,
local deployment = $.apps.v1.deployment,
local policyRule = $.rbac.v1.policyRule,
local roleBinding = $.rbac.v1.roleBinding,
local role = $.rbac.v1.role,
local service = $.core.v1.service,
local serviceAccount = $.core.v1.serviceAccount,
_images+:: {
rollout_operator: 'grafana/rollout-operator:v0.1.1',
},
rollout_operator_args:: {
'kubernetes.namespace': $._config.namespace,
},
local rollout_operator_enabled = $._config.multi_zone_ingester_enabled,
rollout_operator_container::
container.new('rollout-operator', $._images.rollout_operator) +
container.withArgsMixin($.util.mapToFlags($.rollout_operator_args)) +
container.withPorts([
$.core.v1.containerPort.new('http-metrics', 8001),
]) +
$.util.resourcesRequests('100m', '100Mi') +
$.util.resourcesLimits('1', '200Mi') +
container.mixin.readinessProbe.httpGet.withPath('/ready') +
container.mixin.readinessProbe.httpGet.withPort(8001) +
container.mixin.readinessProbe.withInitialDelaySeconds(5) +
container.mixin.readinessProbe.withTimeoutSeconds(1),
rollout_operator_deployment: if !rollout_operator_enabled then {} else
deployment.new('rollout-operator', 1, [$.rollout_operator_container]) +
deployment.mixin.metadata.withName('rollout-operator') +
deployment.mixin.spec.template.spec.withServiceAccountName('rollout-operator') +
// Ensure Kubernetes doesn't run 2 operators at the same time.
deployment.mixin.spec.strategy.rollingUpdate.withMaxSurge(0) +
deployment.mixin.spec.strategy.rollingUpdate.withMaxUnavailable(1),
rollout_operator_role: if !rollout_operator_enabled then null else
role.new('rollout-operator-role') +
role.mixin.metadata.withNamespace($._config.namespace) +
role.withRulesMixin([
policyRule.withApiGroups('') +
policyRule.withResources(['pods']) +
policyRule.withVerbs(['list', 'get', 'watch', 'delete']),
policyRule.withApiGroups('apps') +
policyRule.withResources(['statefulsets']) +
policyRule.withVerbs(['list', 'get', 'watch', 'update', 'patch']),
policyRule.withApiGroups('apps') +
policyRule.withResources(['statefulsets/status']) +
policyRule.withVerbs(['update']),
]),
rollout_operator_rolebinding: if !rollout_operator_enabled then null else
roleBinding.new('rollout-operator-rolebinding') +
roleBinding.mixin.metadata.withNamespace($._config.namespace) +
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
roleBinding.mixin.roleRef.withKind('Role') +
roleBinding.mixin.roleRef.withName('rollout-operator-role') +
roleBinding.withSubjectsMixin({
kind: 'ServiceAccount',
name: 'rollout-operator',
namespace: $._config.namespace,
}),
rollout_operator_service_account: if !rollout_operator_enabled then null else
serviceAccount.new('rollout-operator'),
}

@ -16,8 +16,6 @@
boltdb_shipper_shared_store: error 'must define boltdb_shipper_shared_store when using_boltdb_shipper=true. If this is not intentional, consider disabling it. shared_store is a backend key from the storage_config, such as (gcs) or (s3)',
tsdb_shipper_shared_store: error 'must define tsdb_shipper_shared_store when using_tsdb_shipper=true. If this is not intentional, consider disabling it. shared_store is a backend key from the storage_config, such as (gcs) or (s3)',
// run ingesters and queriers as statefulsets when using boltdb-shipper to avoid using node disk for storing the index.
stateful_ingesters: if self.using_shipper_store then true else super.stateful_ingesters,
stateful_queriers: if self.using_shipper_store && !self.use_index_gateway then true else super.stateful_queriers,
compactor_pvc_size: '10Gi',

@ -1,45 +0,0 @@
local k = import 'ksonnet-util/kausal.libsonnet';
{
local with(x) = if $._config.wal_enabled then x else {},
_config+:: {
stateful_ingesters: if $._config.wal_enabled then true else super.stateful_ingesters,
loki+: with({
ingester+: {
wal+: {
enabled: true,
dir: '/loki/wal',
replay_memory_ceiling: '7GB', // should be set upto ~50% of available memory
},
},
}),
},
local pvc = k.core.v1.persistentVolumeClaim,
ingester_wal_pvc:: with(
pvc.new('ingester-wal') +
pvc.mixin.spec.resources.withRequests({ storage: '150Gi' }) +
pvc.mixin.spec.withAccessModes(['ReadWriteOnce']) +
pvc.mixin.spec.withStorageClassName($._config.ingester_pvc_class)
),
local container = k.core.v1.container,
local volumeMount = k.core.v1.volumeMount,
ingester_container+:: with(
k.util.resourcesRequests('1', '7Gi') +
k.util.resourcesLimits('2', '14Gi') +
container.withVolumeMountsMixin([
volumeMount.new('ingester-wal', $._config.loki.ingester.wal.dir),
]),
),
local statefulSet = k.apps.v1.statefulSet,
ingester_statefulset+: with(
statefulSet.spec.withVolumeClaimTemplatesMixin($.ingester_wal_pvc),
),
}
Loading…
Cancel
Save