From 1c954166b87a3b4920a39d9c8837fc3c93bd85bc Mon Sep 17 00:00:00 2001 From: Travis Patterson Date: Mon, 23 Jan 2023 00:31:18 -0700 Subject: [PATCH] Fix documentation linter errors (#8229) --- docs/sources/best-practices/_index.md | 13 ++-- docs/sources/clients/_index.md | 19 +++--- docs/sources/clients/aws/_index.md | 7 +- docs/sources/clients/aws/ec2/_index.md | 21 +++--- docs/sources/clients/aws/ecs/_index.md | 7 +- docs/sources/clients/aws/eks/_index.md | 7 +- docs/sources/clients/docker-driver/_index.md | 13 ++-- .../clients/docker-driver/configuration.md | 61 +++++++++--------- docs/sources/clients/fluentbit/_index.md | 9 +-- docs/sources/clients/fluentd/_index.md | 11 ++-- docs/sources/clients/k6/_index.md | 15 +++-- docs/sources/clients/k6/log-generation.md | 1 + docs/sources/clients/k6/query-scenario.md | 1 + docs/sources/clients/k6/write-scenario.md | 3 +- .../sources/clients/lambda-promtail/_index.md | 11 ++-- docs/sources/clients/logstash/_index.md | 9 +-- docs/sources/clients/promtail/_index.md | 19 +++--- .../sources/clients/promtail/configuration.md | 27 ++++---- docs/sources/clients/promtail/gcplog-cloud.md | 7 +- docs/sources/clients/promtail/installation.md | 3 +- .../{logrotation.md => logrotation/_index.md} | 11 ++-- .../logrotation-components.png | Bin .../logrotation-copy-and-truncate.png | Bin .../logrotation-rename-and-create.png | Bin docs/sources/clients/promtail/pipelines.md | 37 +++++------ docs/sources/clients/promtail/scraping.md | 9 +-- .../sources/clients/promtail/stages/_index.md | 45 ++++++------- docs/sources/clients/promtail/stages/cri.md | 3 +- .../clients/promtail/stages/decolorize.md | 3 +- .../sources/clients/promtail/stages/docker.md | 3 +- docs/sources/clients/promtail/stages/drop.md | 5 +- docs/sources/clients/promtail/stages/json.md | 5 +- .../clients/promtail/stages/labelallow.md | 3 +- .../clients/promtail/stages/labeldrop.md | 3 +- .../sources/clients/promtail/stages/labels.md | 3 +- docs/sources/clients/promtail/stages/limit.md | 7 +- .../sources/clients/promtail/stages/logfmt.md | 6 +- docs/sources/clients/promtail/stages/match.md | 7 +- .../clients/promtail/stages/metrics.md | 3 +- .../clients/promtail/stages/multiline.md | 5 +- .../sources/clients/promtail/stages/output.md | 3 +- docs/sources/clients/promtail/stages/pack.md | 5 +- docs/sources/clients/promtail/stages/regex.md | 3 +- .../clients/promtail/stages/replace.md | 3 +- .../clients/promtail/stages/static_labels.md | 3 +- .../clients/promtail/stages/template.md | 3 +- .../sources/clients/promtail/stages/tenant.md | 5 +- .../clients/promtail/stages/timestamp.md | 3 +- .../_index.md} | 13 ++-- .../{ => troubleshooting}/inspect.png | Bin docs/sources/community/_index.md | 7 +- docs/sources/community/contributing.md | 1 + docs/sources/community/getting-in-touch.md | 5 +- docs/sources/community/governance.md | 27 ++++---- docs/sources/configuration/query-frontend.md | 3 +- .../2020-02-Promtail-Push-API.md | 3 +- .../2020-09-Write-Ahead-Log.md | 3 +- .../2021-01-Ordering-Constraint-Removal.md | 3 +- docs/sources/design-documents/_index.md | 9 +-- docs/sources/design-documents/labels.md | 3 +- docs/sources/fundamentals/_index.md | 3 +- .../fundamentals/architecture/_index.md | 9 +-- .../{components.md => components/_index.md} | 13 ++-- .../loki_architecture_components.svg | 0 .../_index.md} | 11 ++-- .../microservices-mode.png | Bin .../monolithic-mode.png | Bin .../simple-scalable.png | Bin .../{rings.md => rings/_index.md} | 5 +- .../{ => rings}/ring-overview.png | Bin docs/sources/fundamentals/labels.md | 7 +- docs/sources/fundamentals/overview/_index.md | 5 +- docs/sources/getting-started/_index.md | 12 ++-- docs/sources/installation/_index.md | 13 ++-- docs/sources/installation/docker.md | 3 +- docs/sources/installation/helm/_index.md | 4 +- docs/sources/installation/helm/concepts.md | 4 +- .../helm/configure-storage/index.md | 6 +- .../helm/install-monolithic/index.md | 4 +- .../helm/install-scalable/index.md | 4 +- .../helm/migrate-from-distributed/index.md | 2 +- .../index.md | 2 +- .../installation/install-from-source.md | 1 + docs/sources/installation/istio.md | 5 ++ docs/sources/installation/local.md | 7 +- docs/sources/installation/sizing/index.md | 2 +- docs/sources/installation/tanka.md | 5 +- docs/sources/lids/0001-Introduction.md | 5 +- docs/sources/lids/_index.md | 3 +- docs/sources/lids/template.md | 5 +- docs/sources/logql/_index.md | 7 +- docs/sources/logql/analyzer.md | 4 +- docs/sources/logql/ip.md | 1 + .../{log_queries.md => log_queries/_index.md} | 17 ++--- .../{ => log_queries}/query_components.png | Bin docs/sources/logql/metric_queries.md | 7 +- docs/sources/logql/template_functions.md | 3 +- docs/sources/maintaining/_index.md | 3 +- .../maintaining/release-loki-build-image.md | 3 +- docs/sources/maintaining/release.md | 3 +- docs/sources/operations/_index.md | 1 + docs/sources/operations/authentication.md | 7 +- .../operations/automatic-stream-sharding.md | 2 +- docs/sources/operations/blocking-queries.md | 5 +- docs/sources/operations/grafana.md | 5 +- docs/sources/operations/multi-tenancy.md | 3 +- docs/sources/operations/observability.md | 7 +- docs/sources/operations/overrides-exporter.md | 3 +- docs/sources/operations/recording-rules.md | 11 ++-- .../request-validation-rate-limits.md | 29 +++++---- docs/sources/operations/scalability.md | 7 +- .../_index.md} | 2 +- .../shuffle-sharding-probability.png | Bin docs/sources/operations/storage/_index.md | 17 ++--- .../operations/storage/boltdb-shipper.md | 9 +-- docs/sources/operations/storage/filesystem.md | 5 +- .../operations/storage/logs-deletion.md | 4 +- docs/sources/operations/storage/retention.md | 19 +++--- .../storage/{schema.md => schema/_index.md} | 5 +- .../storage/{ => schema}/schema.png | Bin .../_index.md} | 27 ++++---- ...able-manager-active-vs-inactive-tables.png | Bin .../table-manager-periodic-tables.png | Bin .../table-manager-retention.png | Bin docs/sources/operations/storage/wal.md | 7 +- docs/sources/operations/troubleshooting.md | 9 +-- docs/sources/release-notes/_index.md | 11 ++-- docs/sources/release-notes/v2-3.md | 21 +++--- docs/sources/release-notes/v2-4.md | 19 +++--- docs/sources/release-notes/v2-5.md | 9 +-- docs/sources/release-notes/v2-6.md | 7 +- docs/sources/release-notes/v2-7.md | 7 +- docs/sources/rules/_index.md | 17 ++--- docs/sources/storage/_index.md | 13 ++-- docs/sources/tools/_index.md | 3 +- docs/sources/tools/logcli.md | 3 +- docs/sources/upgrading/_index.md | 17 ++--- 137 files changed, 569 insertions(+), 457 deletions(-) rename docs/sources/clients/promtail/{logrotation.md => logrotation/_index.md} (93%) rename docs/sources/clients/promtail/{ => logrotation}/logrotation-components.png (100%) rename docs/sources/clients/promtail/{ => logrotation}/logrotation-copy-and-truncate.png (100%) rename docs/sources/clients/promtail/{ => logrotation}/logrotation-rename-and-create.png (100%) rename docs/sources/clients/promtail/{troubleshooting.md => troubleshooting/_index.md} (91%) rename docs/sources/clients/promtail/{ => troubleshooting}/inspect.png (100%) rename docs/sources/fundamentals/architecture/{components.md => components/_index.md} (95%) rename docs/sources/fundamentals/architecture/{ => components}/loki_architecture_components.svg (100%) rename docs/sources/fundamentals/architecture/{deployment-modes.md => deployment-modes/_index.md} (90%) rename docs/sources/fundamentals/architecture/{ => deployment-modes}/microservices-mode.png (100%) rename docs/sources/fundamentals/architecture/{ => deployment-modes}/monolithic-mode.png (100%) rename docs/sources/fundamentals/architecture/{ => deployment-modes}/simple-scalable.png (100%) rename docs/sources/fundamentals/architecture/{rings.md => rings/_index.md} (94%) rename docs/sources/fundamentals/architecture/{ => rings}/ring-overview.png (100%) rename docs/sources/logql/{log_queries.md => log_queries/_index.md} (97%) rename docs/sources/logql/{ => log_queries}/query_components.png (100%) rename docs/sources/operations/{shuffle-sharding.md => shuffle-sharding/_index.md} (98%) rename docs/sources/operations/{ => shuffle-sharding}/shuffle-sharding-probability.png (100%) rename docs/sources/operations/storage/{schema.md => schema/_index.md} (96%) rename docs/sources/operations/storage/{ => schema}/schema.png (100%) rename docs/sources/operations/storage/{table-manager.md => table-manager/_index.md} (88%) rename docs/sources/operations/storage/{ => table-manager}/table-manager-active-vs-inactive-tables.png (100%) rename docs/sources/operations/storage/{ => table-manager}/table-manager-periodic-tables.png (100%) rename docs/sources/operations/storage/{ => table-manager}/table-manager-retention.png (100%) diff --git a/docs/sources/best-practices/_index.md b/docs/sources/best-practices/_index.md index 7838b925d0..6703e46dac 100644 --- a/docs/sources/best-practices/_index.md +++ b/docs/sources/best-practices/_index.md @@ -1,8 +1,9 @@ --- title: Best practices +description: Grafana Loki label best practices weight: 400 --- -# Grafana Loki label best practices +# Best practices Grafana Loki is under active development, and we are constantly working to improve performance. But here are some of the most current best practices for labels that will give you the best experience with Loki. @@ -22,7 +23,7 @@ This may seem surprising, but if applications have medium to low volume, that la Above, we mentioned not to add labels until you _need_ them, so when would you _need_ labels?? A little farther down is a section on `chunk_target_size`. If you set this to 1MB (which is reasonable), this will try to cut chunks at 1MB compressed size, which is about 5MB-ish of uncompressed logs (might be as much as 10MB depending on compression). If your logs have sufficient volume to write 5MB in less time than `max_chunk_age`, or **many** chunks in that timeframe, you might want to consider splitting it into separate streams with a dynamic label. -What you want to avoid is splitting a log file into streams, which result in chunks getting flushed because the stream is idle or hits the max age before being full. As of [Loki 1.4.0](https://grafana.com/blog/2020/04/01/loki-v1.4.0-released-with-query-statistics-and-up-to-300x-regex-optimization/), there is a metric which can help you understand why chunks are flushed `sum by (reason) (rate(loki_ingester_chunks_flushed_total{cluster="dev"}[1m]))`. +What you want to avoid is splitting a log file into streams, which result in chunks getting flushed because the stream is idle or hits the max age before being full. As of [Loki 1.4.0](/blog/2020/04/01/loki-v1.4.0-released-with-query-statistics-and-up-to-300x-regex-optimization/), there is a metric which can help you understand why chunks are flushed `sum by (reason) (rate(loki_ingester_chunks_flushed_total{cluster="dev"}[1m]))`. It’s not critical that every chunk be full when flushed, but it will improve many aspects of operation. As such, our current guidance here is to avoid dynamic labels as much as possible and instead favor filter expressions. For example, don’t add a `level` dynamic label, just `|= "level=debug"` instead. @@ -34,11 +35,11 @@ Try to keep values bounded to as small a set as possible. We don't have perfect ## Be aware of dynamic labels applied by clients -Loki has several client options: [Promtail](https://github.com/grafana/loki/tree/master/docs/sources/clients/promtail) (which also supports systemd journal ingestion and TCP-based syslog ingestion), [Fluentd](https://github.com/grafana/loki/tree/main/clients/cmd/fluentd), [Fluent Bit](https://github.com/grafana/loki/tree/main/clients/cmd/fluent-bit), a [Docker plugin](https://grafana.com/blog/2019/07/15/lokis-path-to-ga-docker-logging-driver-plugin-support-for-systemd/), and more! +Loki has several client options: [Promtail](/grafana/loki/tree/master/docs/sources/clients/promtail) (which also supports systemd journal ingestion and TCP-based syslog ingestion), [Fluentd](https://github.com/grafana/loki/tree/main/clients/cmd/fluentd), [Fluent Bit](https://github.com/grafana/loki/tree/main/clients/cmd/fluent-bit), a [Docker plugin](/blog/2019/07/15/lokis-path-to-ga-docker-logging-driver-plugin-support-for-systemd/), and more! Each of these come with ways to configure what labels are applied to create log streams. But be aware of what dynamic labels might be applied. Use the Loki series API to get an idea of what your log streams look like and see if there might be ways to reduce streams and cardinality. -Series information can be queried through the [Series API](https://grafana.com/docs/loki/latest/api/#series), or you can use [logcli](https://grafana.com/docs/loki/latest/getting-started/logcli/). +Series information can be queried through the [Series API](/docs/loki/latest/api/#series), or you can use [logcli](/docs/loki/latest/getting-started/logcli/). In Loki 1.6.0 and newer the logcli series command added the `--analyze-labels` flag specifically for debugging high cardinality labels: @@ -69,7 +70,7 @@ Loki can cache data at many levels, which can drastically improve performance. D ## Time ordering of logs -Loki [accepts out-of-order writes](../configuration/#accept-out-of-order-writes) _by default_. +Loki [accepts out-of-order writes]({{< relref "../configuration/#accept-out-of-order-writes" >}}) _by default_. This section identifies best practices when Loki is _not_ configured to accept out-of-order writes. One issue many people have with Loki is their client receiving errors for out of order log entries. This happens because of this hard and fast rule within Loki: @@ -101,7 +102,7 @@ What can we do about this? What if this was because the sources of these logs we {job="syslog", instance="host2"} 00:00:02 i'm a syslog! <- Accepted, still in order for stream 2 ``` -But what if the application itself generated logs that were out of order? Well, I'm afraid this is a problem. If you are extracting the timestamp from the log line with something like [the Promtail pipeline stage](https://grafana.com/docs/loki/latest/clients/promtail/stages/timestamp/), you could instead _not_ do this and let Promtail assign a timestamp to the log lines. Or you can hopefully fix it in the application itself. +But what if the application itself generated logs that were out of order? Well, I'm afraid this is a problem. If you are extracting the timestamp from the log line with something like [the Promtail pipeline stage](/docs/loki/latest/clients/promtail/stages/timestamp/), you could instead _not_ do this and let Promtail assign a timestamp to the log lines. Or you can hopefully fix it in the application itself. It's also worth noting that the batching nature of the Loki push API can lead to some instances of out of order errors being received which are really false positives. (Perhaps a batch partially succeeded and was present; or anything that previously succeeded would return an out of order entry; or anything new would be accepted.) diff --git a/docs/sources/clients/_index.md b/docs/sources/clients/_index.md index bd15056e2d..bd9b72d722 100644 --- a/docs/sources/clients/_index.md +++ b/docs/sources/clients/_index.md @@ -1,21 +1,22 @@ --- title: Clients +description: Grafana Loki clients weight: 600 --- -# Grafana Loki clients +# Clients Grafana Loki supports the following official clients for sending logs: -- [Promtail](promtail/) -- [Docker Driver](docker-driver/) -- [Fluentd](fluentd/) -- [Fluent Bit](fluentbit/) -- [Logstash](logstash/) -- [Lambda Promtail](lambda-promtail/) +- [Promtail]({{}}) +- [Docker Driver]({{}}) +- [Fluentd]({{}}) +- [Fluent Bit]({{}}) +- [Logstash]({{}}) +- [Lambda Promtail]({{}}) There are also a number of third-party clients, see [Unofficial clients](#unofficial-clients). -The [xk6-loki extension](https://github.com/grafana/xk6-loki) permits [load testing Loki](k6/). +The [xk6-loki extension](https://github.com/grafana/xk6-loki) permits [load testing Loki]({{}}). ## Picking a client @@ -58,7 +59,7 @@ By adding our output plugin you can quickly try Loki without doing big configura ### Lambda Promtail -This is a workflow combining the Promtail push-api [scrape config](promtail/configuration#loki_push_api_config) and the [lambda-promtail](lambda-promtail/) AWS Lambda function which pipes logs from Cloudwatch to Loki. +This is a workflow combining the Promtail push-api [scrape config]({{}}) and the [lambda-promtail]({{}}) AWS Lambda function which pipes logs from Cloudwatch to Loki. This is a good choice if you're looking to try out Loki in a low-footprint way or if you wish to monitor AWS lambda logs in Loki. diff --git a/docs/sources/clients/aws/_index.md b/docs/sources/clients/aws/_index.md index 1c6bae40e4..ea71b88d74 100644 --- a/docs/sources/clients/aws/_index.md +++ b/docs/sources/clients/aws/_index.md @@ -1,10 +1,11 @@ --- title: AWS +description: AWS Clients weight: 30 --- Sending logs from AWS services to Grafana Loki is a little different depending on what AWS service you are using: -* [Elastic Compute Cloud (EC2)](ec2/) -* [Elastic Container Service (ECS)](ecs/) -* [Elastic Kubernetes Service (EKS)](eks/) +* [Elastic Compute Cloud (EC2)]({{}}) +* [Elastic Container Service (ECS)]({{}}) +* [Elastic Kubernetes Service (EKS)]({{}}) diff --git a/docs/sources/clients/aws/ec2/_index.md b/docs/sources/clients/aws/ec2/_index.md index 4a1fe76b87..b1f5aa1e63 100644 --- a/docs/sources/clients/aws/ec2/_index.md +++ b/docs/sources/clients/aws/ec2/_index.md @@ -1,13 +1,14 @@ --- title: EC2 +description: Running Promtail on AWS EC2 --- -# Running Promtail on AWS EC2 +# EC2 -In this tutorial we're going to setup [Promtail](../../promtail/) on an AWS EC2 instance and configure it to sends all its logs to a Grafana Loki instance. +In this tutorial we're going to setup [Promtail]({{< relref "../../promtail/" >}}) on an AWS EC2 instance and configure it to sends all its logs to a Grafana Loki instance. -- [Running Promtail on AWS EC2](#running-promtail-on-aws-ec2) +- [Running Promtail on AWS EC2](#ec2) - [Requirements](#requirements) - [Creating an EC2 instance](#creating-an-ec2-instance) - [Setting up Promtail](#setting-up-promtail) @@ -47,7 +48,7 @@ aws ec2 create-security-group --group-name promtail-ec2 --description "promtail } ``` -Now let's authorize inbound access for SSH and [Promtail](../../promtail/) server: +Now let's authorize inbound access for SSH and [Promtail]({{< relref "../../promtail/" >}}) server: ```bash aws ec2 authorize-security-group-ingress --group-id sg-02c489bbdeffdca1d --protocol tcp --port 22 --cidr 0.0.0.0/0 @@ -87,7 +88,7 @@ ssh ec2-user@ec2-13-59-62-37.us-east-2.compute.amazonaws.com ## Setting up Promtail First let's make sure we're running as root by using `sudo -s`. -Next we'll download, install and give executable right to [Promtail](../../promtail/). +Next we'll download, install and give executable right to [Promtail]({{< relref "../../promtail/" >}}). ```bash mkdir /opt/promtail && cd /opt/promtail @@ -96,7 +97,7 @@ unzip "promtail-linux-amd64.zip" chmod a+x "promtail-linux-amd64" ``` -Now we're going to download the [Promtail configuration](../../promtail/) file below and edit it, don't worry we will explain what those means. +Now we're going to download the [Promtail configuration]({{< relref "../../promtail/" >}}) file below and edit it, don't worry we will explain what those means. The file is also available as a gist at [cyriltovena/promtail-ec2.yaml][config gist]. ```bash @@ -139,11 +140,11 @@ scrape_configs: target_label: __host__ ``` -The **server** section indicates Promtail to bind his http server to 3100. Promtail serves HTTP pages for [troubleshooting](../../promtail/troubleshooting) service discovery and targets. +The **server** section indicates Promtail to bind his http server to 3100. Promtail serves HTTP pages for [troubleshooting]({{< relref "../../promtail/troubleshooting" >}}) service discovery and targets. The **clients** section allow you to target your loki instance, if you're using GrafanaCloud simply replace `` and `` with your credentials. Otherwise just replace the whole URL with your custom Loki instance.(e.g `http://my-loki-instance.my-org.com/loki/api/v1/push`) -[Promtail](../../promtail/) uses the same [Prometheus **scrape_configs**][prometheus scrape config]. This means if you already own a Prometheus instance the config will be very similar and easy to grasp. +[Promtail]({{< relref "../../promtail/" >}}) uses the same [Prometheus **scrape_configs**][prometheus scrape config]. This means if you already own a Prometheus instance the config will be very similar and easy to grasp. Since we're running on AWS EC2 we want to uses EC2 service discovery, this will allows us to scrape metadata about the current instance (and even your custom tags) and attach those to our logs. This way managing and querying on logs will be much easier. @@ -234,7 +235,7 @@ Jul 08 15:48:57 ip-172-31-45-69.us-east-2.compute.internal promtail-linux-amd64[ Jul 08 15:48:57 ip-172-31-45-69.us-east-2.compute.internal promtail-linux-amd64[2732]: level=info ts=2020-07-08T15:48:57.56029474Z caller=main.go:67 msg="Starting Promtail" version="(version=1.6.0, branch=HEAD, revision=12c7eab8)" ``` -You can now verify in Grafana that Loki has correctly received your instance logs by using the [LogQL](../../../logql/) query `{zone="us-east-2"}`. +You can now verify in Grafana that Loki has correctly received your instance logs by using the [LogQL]({{< relref "../../../logql/" >}}) query `{zone="us-east-2"}`. ![Grafana Loki logs][ec2 logs] @@ -263,7 +264,7 @@ Note that you can use [relabeling][relabeling] to convert systemd labels to matc That's it, save the config and you can `reboot` the machine (or simply restart the service `systemctl restart promtail.service`). -Let's head back to Grafana and verify that your Promtail logs are available in Grafana by using the [LogQL](../../../logql/) query `{unit="promtail.service"}` in Explore. Finally make sure to checkout [live tailing][live tailing] to see logs appearing as they are ingested in Loki. +Let's head back to Grafana and verify that your Promtail logs are available in Grafana by using the [LogQL]({{< relref "../../../logql/" >}}) query `{unit="promtail.service"}` in Explore. Finally make sure to checkout [live tailing][live tailing] to see logs appearing as they are ingested in Loki. [promtail]: ../../promtail/README [aws cli]: https://aws.amazon.com/cli/ diff --git a/docs/sources/clients/aws/ecs/_index.md b/docs/sources/clients/aws/ecs/_index.md index d1f94fb76d..52b6ca4b23 100644 --- a/docs/sources/clients/aws/ecs/_index.md +++ b/docs/sources/clients/aws/ecs/_index.md @@ -1,7 +1,8 @@ --- title: ECS +description: ending Logs From AWS Elastic Container Service (ECS) --- -# Sending Logs From AWS Elastic Container Service (ECS) +# ECS [ECS][ECS] is the fully managed container orchestration service by Amazon. Combined with [Fargate][Fargate] you can run your container workload without the need to provision your own compute resources. In this tutorial we will see how you can leverage [Firelens][Firelens] an AWS log router to forward all your logs and your workload metadata to a Grafana Loki instance. @@ -9,7 +10,7 @@ After this tutorial you will able to query all your logs in one place using Graf -- [Sending Logs From AWS Elastic Container Service (ECS)](#sending-logs-from-aws-elastic-container-service-ecs) +- [Sending Logs From AWS Elastic Container Service (ECS)](#ecs) - [Requirements](#requirements) - [Setting up the ECS cluster](#setting-up-the-ecs-cluster) - [Creating your task definition](#creating-your-task-definition) @@ -73,7 +74,7 @@ aws iam create-role --role-name ecsTaskExecutionRole --assume-role-policy-docum Note down the [ARN][arn] of this new role, we'll use it later to create an ECS task. -Finally we'll give the [ECS task execution policy][ecs iam](`AmazonECSTaskExecutionRolePolicy`) to the created role, this will allows us to manage logs with [Firelens][Firelens]: +Finally we'll give the [ECS task execution policy][ecs iam] `AmazonECSTaskExecutionRolePolicy` to the created role, this will allows us to manage logs with [Firelens][Firelens]: ```bash aws iam attach-role-policy --role-name ecsTaskExecutionRole --policy-arn "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" diff --git a/docs/sources/clients/aws/eks/_index.md b/docs/sources/clients/aws/eks/_index.md index ebf18d52bb..3850259b4d 100644 --- a/docs/sources/clients/aws/eks/_index.md +++ b/docs/sources/clients/aws/eks/_index.md @@ -1,7 +1,8 @@ --- title: EKS +description: Sending logs from EKS with Promtail --- -# Sending logs from EKS with Promtail +# EKS In this tutorial we'll see how to set up Promtail on [EKS][eks]. Amazon Elastic Kubernetes Service (Amazon [EKS][eks]) is a fully managed Kubernetes service, using Promtail we'll get full visibility into our cluster logs. We'll start by forwarding pods logs then nodes services and finally Kubernetes events. @@ -9,7 +10,7 @@ After this tutorial you will able to query all your logs in one place using Graf -- [Sending logs from EKS with Promtail](#sending-logs-from-eks-with-promtail) +- [Sending logs from EKS with Promtail](#eks) - [Requirements](#requirements) - [Setting up the cluster](#setting-up-the-cluster) - [Adding Promtail DaemonSet](#adding-promtail-daemonset) @@ -51,7 +52,7 @@ Server Version: version.Info{Major:"1", Minor:"16+", GitVersion:"v1.16.8-eks-fd1 ## Adding Promtail DaemonSet -To ship all your pods logs we're going to set up [Promtail](../../promtail/) as a DaemonSet in our cluster. This means it will run on each nodes of the cluster, we will then configure it to find the logs of your containers on the host. +To ship all your pods logs we're going to set up [Promtail]({{< relref "../../promtail/" >}}) as a DaemonSet in our cluster. This means it will run on each nodes of the cluster, we will then configure it to find the logs of your containers on the host. What's nice about Promtail is that it uses the same [service discovery as Prometheus][prometheus conf], you should make sure the `scrape_configs` of Promtail matches the Prometheus one. Not only this is simpler to configure, but this also means Metrics and Logs will have the same metadata (labels) attached by the Prometheus service discovery. When querying Grafana you will be able to correlate metrics and logs very quickly, you can read more about this on our [blogpost][correlate]. diff --git a/docs/sources/clients/docker-driver/_index.md b/docs/sources/clients/docker-driver/_index.md index c979b3a682..cbbb26cf53 100644 --- a/docs/sources/clients/docker-driver/_index.md +++ b/docs/sources/clients/docker-driver/_index.md @@ -1,18 +1,19 @@ --- title: Docker driver +description: Docker driver client weight: 40 --- -# Docker Driver Client +# Docker driver Grafana Loki officially supports a Docker plugin that will read logs from Docker containers and ship them to Loki. The plugin can be configured to send the logs -to a private Loki instance or [Grafana Cloud](https://grafana.com/oss/loki). +to a private Loki instance or [Grafana Cloud](/oss/loki). > Docker plugins are not yet supported on Windows; see the > [Docker Engine managed plugin system](https://docs.docker.com/engine/extend) documentation for more information. Documentation on configuring the Loki Docker Driver can be found on the -[configuration page](./configuration). +[configuration page]({{}}). If you have any questions or issues using the Docker plugin feel free to open an issue in this [repository](https://github.com/grafana/loki/issues). @@ -36,7 +37,7 @@ ID NAME DESCRIPTION ENABLED ac720b8fcfdb loki Loki Logging Driver true ``` -Once the plugin is installed it can be [configured](./configuration). +Once the plugin is installed it can be [configured]({{}}). ## Upgrading @@ -59,8 +60,8 @@ docker plugin disable loki --force docker plugin rm loki ``` -# Know Issues +## Known Issues The driver keeps all logs in memory and will drop log entries if Loki is not reachable and if the quantity of `max_retries` has been exceeded. To avoid the dropping of log entries, setting `max_retries` to zero allows unlimited retries; the drive will continue trying forever until Loki is again reachable. Trying forever may have undesired consequences, because the Docker daemon will wait for the Loki driver to process all logs of a container, until the container is removed. Thus, the Docker daemon might wait forever if the container is stuck. -Use Promtail's [Docker target](../promtail/configuration/#docker) or [Docker service discovery](../promtail/configuration/#docker_sd_config) to avoid this issue. +Use Promtail's [Docker target]({{}}) or [Docker service discovery]({{}}) to avoid this issue. diff --git a/docs/sources/clients/docker-driver/configuration.md b/docs/sources/clients/docker-driver/configuration.md index 98da9cef4d..3e8abf1293 100644 --- a/docs/sources/clients/docker-driver/configuration.md +++ b/docs/sources/clients/docker-driver/configuration.md @@ -1,14 +1,15 @@ --- title: Configuration +description: Configuring the Docker Driver --- -# Configuring the Docker Driver +# Configuration The Docker daemon on each machine has a default logging driver and each container will use the default driver unless configured otherwise. ## Installation -Before configuring the plugin, [install or upgrade the Grafana Loki Docker Driver Client](../../docker-driver/) +Before configuring the plugin, [install or upgrade the Grafana Loki Docker Driver Client]({{}}) ## Change the logging driver for a container @@ -103,7 +104,7 @@ Once deployed, the Grafana service will send its logs to Loki. ## Labels -Loki can received a set of labels along with log line. These labels are used to index log entries and query back logs using [LogQL stream selector](../../../logql/#log-stream-selector). +Loki can received a set of labels along with log line. These labels are used to index log entries and query back logs using [LogQL stream selector]({{}}). By default, the Docker driver will add the following labels to each log line: @@ -196,33 +197,33 @@ services: To specify additional logging driver options, you can use the --log-opt NAME=VALUE flag. -| Option | Required? | Default Value | Description | -|---------------------------------|:---------:|:--------------------------:|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `loki-url` | Yes | | Loki HTTP push endpoint. | -| `loki-external-labels` | No | `container_name={{.Name}}` | Additional label value pair separated by `,` to send with logs. The value is expanded with the [Docker tag template format](https://docs.docker.com/config/containers/logging/log_tags/). (eg: `container_name={{.ID}}.{{.Name}},cluster=prod`) | -| `loki-timeout` | No | `10s` | The timeout to use when sending logs to the Loki instance. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | -| `loki-batch-wait` | No | `1s` | The amount of time to wait before sending a log batch complete or not. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | -| `loki-batch-size` | No | `1048576` | The maximum size of a log batch to send. | -| `loki-min-backoff` | No | `500ms` | The minimum amount of time to wait before retrying a batch. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | -| `loki-max-backoff` | No | `5m` | The maximum amount of time to wait before retrying a batch. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | -| `loki-retries` | No | `10` | The maximum amount of retries for a log batch. Setting it to `0` will retry indefinitely. | -| `loki-pipeline-stage-file` | No | | The location of a pipeline stage configuration file ([example](https://github.com/grafana/loki/blob/main/clients/cmd/docker-driver/pipeline-example.yaml)). Pipeline stages allows to parse log lines to extract more labels, [see associated documentation](../../promtail/stages/). | -| `loki-pipeline-stages` | No | | The pipeline stage configuration provided as a string [see pipeline stages](#pipeline-stages) and [associated documentation](../../promtail/stages/). | -| `loki-relabel-config` | No | | A [Prometheus relabeling configuration](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) allowing you to rename labels [see relabeling](#relabeling). | -| `loki-tenant-id` | No | | Set the tenant id (http header`X-Scope-OrgID`) when sending logs to Loki. It can be overridden by a pipeline stage. | -| `loki-tls-ca-file` | No | | Set the path to a custom certificate authority. | -| `loki-tls-cert-file` | No | | Set the path to a client certificate file. | -| `loki-tls-key-file` | No | | Set the path to a client key. | -| `loki-tls-server-name` | No | | Name used to validate the server certificate. | -| `loki-tls-insecure-skip-verify` | No | `false` | Allow to skip tls verification. | -| `loki-proxy-url` | No | | Proxy URL use to connect to Loki. | -| `no-file` | No | `false` | This indicates the driver to not create log files on disk, however this means you won't be able to use `docker logs` on the container anymore. You can use this if you don't need to use `docker logs` and you run with limited disk space. (By default files are created) | -| `keep-file` | No | `false` | This indicates the driver to keep json log files once the container is stopped. By default files are removed, this means you won't be able to use `docker logs` once the container is stopped. | -| `max-size` | No | -1 | The maximum size of the log before it is rolled. A positive integer plus a modifier representing the unit of measure (k, m, or g). Defaults to -1 (unlimited). This is used by json-log required to keep the `docker log` command working. | -| `max-file` | No | 1 | The maximum number of log files that can be present. If rolling the logs creates excess files, the oldest file is removed. Only effective when max-size is also set. A positive integer. Defaults to 1. | -| `labels` | No | | Comma-separated list of keys of labels, which should be included in message, if these labels are specified for container. | -| `env` | No | | Comma-separated list of keys of environment variables to be included in message if they specified for a container. | -| `env-regex` | No | | A regular expression to match logging-related environment variables. Used for advanced log label options. If there is collision between the label and env keys, the value of the env takes precedence. Both options add additional fields to the labels of a logging message. | +| Option | Required? | Default Value | Description | +|---------------------------------|:---------:|:--------------------------:|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `loki-url` | Yes | | Loki HTTP push endpoint. | +| `loki-external-labels` | No | `container_name={{.Name}}` | Additional label value pair separated by `,` to send with logs. The value is expanded with the [Docker tag template format](https://docs.docker.com/config/containers/logging/log_tags/). (eg: `container_name={{.ID}}.{{.Name}},cluster=prod`) | +| `loki-timeout` | No | `10s` | The timeout to use when sending logs to the Loki instance. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | +| `loki-batch-wait` | No | `1s` | The amount of time to wait before sending a log batch complete or not. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | +| `loki-batch-size` | No | `1048576` | The maximum size of a log batch to send. | +| `loki-min-backoff` | No | `500ms` | The minimum amount of time to wait before retrying a batch. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | +| `loki-max-backoff` | No | `5m` | The maximum amount of time to wait before retrying a batch. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | +| `loki-retries` | No | `10` | The maximum amount of retries for a log batch. Setting it to `0` will retry indefinitely. | +| `loki-pipeline-stage-file` | No | | The location of a pipeline stage configuration file ([example](/grafana/loki/blob/main/clients/cmd/docker-driver/pipeline-example.yaml)). Pipeline stages allows to parse log lines to extract more labels, [see associated documentation]({{}}). | +| `loki-pipeline-stages` | No | | The pipeline stage configuration provided as a string [see pipeline stages](#pipeline-stages) and [associated documentation]({{}}). | +| `loki-relabel-config` | No | | A [Prometheus relabeling configuration](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) allowing you to rename labels [see relabeling](#relabeling). | +| `loki-tenant-id` | No | | Set the tenant id (http header`X-Scope-OrgID`) when sending logs to Loki. It can be overridden by a pipeline stage. | +| `loki-tls-ca-file` | No | | Set the path to a custom certificate authority. | +| `loki-tls-cert-file` | No | | Set the path to a client certificate file. | +| `loki-tls-key-file` | No | | Set the path to a client key. | +| `loki-tls-server-name` | No | | Name used to validate the server certificate. | +| `loki-tls-insecure-skip-verify` | No | `false` | Allow to skip tls verification. | +| `loki-proxy-url` | No | | Proxy URL use to connect to Loki. | +| `no-file` | No | `false` | This indicates the driver to not create log files on disk, however this means you won't be able to use `docker logs` on the container anymore. You can use this if you don't need to use `docker logs` and you run with limited disk space. (By default files are created) | +| `keep-file` | No | `false` | This indicates the driver to keep json log files once the container is stopped. By default files are removed, this means you won't be able to use `docker logs` once the container is stopped. | +| `max-size` | No | -1 | The maximum size of the log before it is rolled. A positive integer plus a modifier representing the unit of measure (k, m, or g). Defaults to -1 (unlimited). This is used by json-log required to keep the `docker log` command working. | +| `max-file` | No | 1 | The maximum number of log files that can be present. If rolling the logs creates excess files, the oldest file is removed. Only effective when max-size is also set. A positive integer. Defaults to 1. | +| `labels` | No | | Comma-separated list of keys of labels, which should be included in message, if these labels are specified for container. | +| `env` | No | | Comma-separated list of keys of environment variables to be included in message if they specified for a container. | +| `env-regex` | No | | A regular expression to match logging-related environment variables. Used for advanced log label options. If there is collision between the label and env keys, the value of the env takes precedence. Both options add additional fields to the labels of a logging message. | ## Troubleshooting diff --git a/docs/sources/clients/fluentbit/_index.md b/docs/sources/clients/fluentbit/_index.md index 923482e0c2..24416e8f84 100644 --- a/docs/sources/clients/fluentbit/_index.md +++ b/docs/sources/clients/fluentbit/_index.md @@ -1,8 +1,9 @@ --- title: Fluent Bit +description: Fluent Bit Loki Output weight: 50 --- -# Fluent Bit Loki Output +# Fluent Bit [Fluent Bit](https://fluentbit.io/) is a fast and lightweight logs and metrics processor and forwarder that can be configured with the [Grafana Loki output plugin](https://docs.fluentbit.io/manual/pipeline/outputs/loki) to ship logs to Loki. You can define which log files you want to collect using the [`Tail`](https://docs.fluentbit.io/manual/pipeline/inputs/tail) or [`Stdin`](https://docs.fluentbit.io/manual/pipeline/inputs/standard-input) data pipeline input. Additionally, Fluent Bit supports multiple `Filter` and `Parser` plugins (`Kubernetes`, `JSON`, etc.) to structure and alter log lines. @@ -43,7 +44,7 @@ helm upgrade --install loki-stack grafana/loki-stack \ ### AWS Elastic Container Service (ECS) You can use fluent-bit Loki Docker image as a Firelens log router in AWS ECS. -For more information about this see our [AWS documentation](../aws/ecs) +For more information about this see our [AWS documentation]({{}}) ### Local @@ -91,7 +92,7 @@ You can also adapt your plugins.conf, removing the need to change the command li ### Labels -Labels are used to [query logs](../../logql) `{container_name="nginx", cluster="us-west1"}`, they are usually metadata about the workload producing the log stream (`instance`, `container_name`, `region`, `cluster`, `level`). In Loki labels are indexed consequently you should be cautious when choosing them (high cardinality label values can have performance drastic impact). +Labels are used to [query logs]({{}}) `{container_name="nginx", cluster="us-west1"}`, they are usually metadata about the workload producing the log stream (`instance`, `container_name`, `region`, `cluster`, `level`). In Loki labels are indexed consequently you should be cautious when choosing them (high cardinality label values can have performance drastic impact). You can use `Labels`, `RemoveKeys` , `LabelKeys` and `LabelMapPath` to how the output plugin will perform labels extraction. @@ -150,7 +151,7 @@ Buffering refers to the ability to store the records somewhere, and while they a The blocking state with some of the input plugins is not acceptable, because it can have an undesirable side effect on the part that generates the logs. Fluent Bit implements a buffering mechanism that is based on parallel processing. Therefore, it cannot send logs in order. There are two ways of handling the out-of-order logs: -- Configure Loki to [accept out-of-order writes](../../configuration/#accept-out-of-order-writes). +- Configure Loki to [accept out-of-order writes]({{}}). - Configure the Loki output plugin to use the buffering mechanism based on [`dque`](https://github.com/joncrlsn/dque), which is compatible with the Loki server strict time ordering: diff --git a/docs/sources/clients/fluentd/_index.md b/docs/sources/clients/fluentd/_index.md index 76a252d163..aa59f7fbdf 100644 --- a/docs/sources/clients/fluentd/_index.md +++ b/docs/sources/clients/fluentd/_index.md @@ -1,12 +1,13 @@ --- title: Fluentd +description: Fluentd Loki Output Plugin weight: 60 --- -# Fluentd Loki Output Plugin +# Fluentd Grafana Loki has a [Fluentd](https://www.fluentd.org/) output plugin called `fluent-plugin-grafana-loki` that enables shipping logs to a private Loki -instance or [Grafana Cloud](https://grafana.com/products/cloud/). +instance or [Grafana Cloud](/products/cloud/). The plugin source code is in the [fluentd directory of the repository](https://github.com/grafana/loki/tree/main/clients/cmd/fluentd). @@ -26,7 +27,7 @@ The Docker image `grafana/fluent-plugin-loki:master` contains [default configura This image also uses `LOKI_URL`, `LOKI_USERNAME`, and `LOKI_PASSWORD` environment variables to specify the Loki's endpoint, user, and password (you can leave the USERNAME and PASSWORD blank if they're not used). -This image will start an instance of Fluentd to forward incoming logs to the specified Loki URL. As an alternate, containerized applications can also use [docker driver plugin](../docker-driver/) to ship logs without needing Fluentd. +This image will start an instance of Fluentd to forward incoming logs to the specified Loki URL. As an alternate, containerized applications can also use [docker driver plugin]({{}}) to ship logs without needing Fluentd. ### Example @@ -146,7 +147,7 @@ Use with the `remove_keys kubernetes` option to eliminate metadata from the log. ### Multi-worker usage -Out-of-order inserts are enabled by default in Loki; refer to [accept out-of-order writes](../../configuration/#accept-out-of-order-writes). +Out-of-order inserts are enabled by default in Loki; refer to [accept out-of-order writes]({{}}). If out-of-order inserts are _disabled_, attempting to insert a log entry with an earlier timestamp after a log entry with identical labels but a later timestamp, the insert will fail with `HTTP status code: 500, message: rpc error: code = Unknown desc = Entry out of order`. Therefore, in order to use this plugin in a multi worker Fluentd setup, you'll need to include the worker ID in the labels or otherwise [ensure log streams are always sent to the same worker](https://docs.fluentd.org/deployment/multi-process-workers#less-than-worker-n-greater-than-directive). For example, using [fluent-plugin-record-modifier](https://github.com/repeatedly/fluent-plugin-record-modifier): @@ -182,7 +183,7 @@ This plugin automatically adds a `fluentd_thread` label with the name of the buf ### `url` The URL of the Loki server to send logs to. When sending data, the publish path (`../api/loki/v1/push`) will automatically be appended. -By default the url is set to `https://logs-prod-us-central1.grafana.net`, the url of the Grafana Labs [hosted Loki](https://grafana.com/products/cloud/) service. +By default the url is set to `https://logs-prod-us-central1.grafana.net`, the url of the Grafana Labs [hosted Loki](/products/cloud/) service. #### Proxy Support diff --git a/docs/sources/clients/k6/_index.md b/docs/sources/clients/k6/_index.md index b36d3f0d08..c0d90e778c 100644 --- a/docs/sources/clients/k6/_index.md +++ b/docs/sources/clients/k6/_index.md @@ -1,9 +1,10 @@ --- title: k6 load testing +description: k6 Loki extension load testing weight: 90 --- -# k6 Loki extension load testing +# k6 load testing Grafana [k6](https://k6.io) is a modern load-testing tool. Its clean and approachable scripting [API](https://k6.io/docs/javascript-api/) @@ -80,12 +81,12 @@ The `Client` class exposes the following instance methods: | method | description | | ------ | ----------- | | `push()` | shortcut for `pushParameterized(5, 800*1024, 1024*1024)` | -| `pushParameterized(streams, minSize, maxSize)` | execute push request ([POST /loki/api/v1/push]({{< relref "../../api/_index.md#post-lokiapiv1push" >}})) | -| `instantQuery(query, limit)` | execute instant query ([GET /loki/api/v1/query]({{< relref "../../api/_index.md#get-lokiapiv1query" >}})) | -| `client.rangeQuery(query, duration, limit)` | execute range query ([GET /loki/api/v1/query_range]({{< relref "../../api/_index.md#get-lokiapiv1query_range" >}})) | -| `client.labelsQuery(duration)` | execute labels query ([GET /loki/api/v1/labels]({{< relref "../../api/_index.md#get-lokiapiv1labels" >}})) | -| `client.labelValuesQuery(label, duration)` | execute label values query ([GET /loki/api/v1/label/\/values]({{< relref "../../api/_index.md#get-lokiapiv1labelnamevalues" >}})) | -| `client.seriesQuery(matchers, duration)` | execute series query ([GET /loki/api/v1/series]({{< relref "../../api/_index.md#series" >}})) | +| `pushParameterized(streams, minSize, maxSize)` | execute push request ([POST /loki/api/v1/push]({{< relref "../../api/#push-log-entries-to-loki" >}})) | +| `instantQuery(query, limit)` | execute instant query ([GET /loki/api/v1/query]({{< relref "../../api/#query-loki" >}})) | +| `client.rangeQuery(query, duration, limit)` | execute range query ([GET /loki/api/v1/query_range]({{< relref "../../api/#query-loki-over-a-range-of-time" >}})) | +| `client.labelsQuery(duration)` | execute labels query ([GET /loki/api/v1/labels]({{< relref "../../api/#list-labels-within-a-range-of-time" >}})) | +| `client.labelValuesQuery(label, duration)` | execute label values query ([GET /loki/api/v1/label/\/values]({{< relref "../../api/#list-label-values-within-a-range-of-time" >}})) | +| `client.seriesQuery(matchers, duration)` | execute series query ([GET /loki/api/v1/series]({{< relref "../../api/#list-series" >}})) | **Javascript load test example:** diff --git a/docs/sources/clients/k6/log-generation.md b/docs/sources/clients/k6/log-generation.md index 24d3ed8bbd..fbd4a3fef1 100644 --- a/docs/sources/clients/k6/log-generation.md +++ b/docs/sources/clients/k6/log-generation.md @@ -1,5 +1,6 @@ --- title: Log generation +description: Log generation with K6 weight: 10 --- # Log generation diff --git a/docs/sources/clients/k6/query-scenario.md b/docs/sources/clients/k6/query-scenario.md index 03abd3be01..6615af57ee 100644 --- a/docs/sources/clients/k6/query-scenario.md +++ b/docs/sources/clients/k6/query-scenario.md @@ -1,5 +1,6 @@ --- title: Query testing +description: Query testing with K6 weight: 30 --- # Query testing diff --git a/docs/sources/clients/k6/write-scenario.md b/docs/sources/clients/k6/write-scenario.md index 5d4373dc7e..0ecc0b4180 100644 --- a/docs/sources/clients/k6/write-scenario.md +++ b/docs/sources/clients/k6/write-scenario.md @@ -1,8 +1,9 @@ --- title: Write path testing +description: Write path testing with K6 weight: 20 --- -# Write path load testing +# Write path testing There are multiple considerations when load testing a Loki cluster's write path. diff --git a/docs/sources/clients/lambda-promtail/_index.md b/docs/sources/clients/lambda-promtail/_index.md index 8c02a07060..6b2fed605f 100644 --- a/docs/sources/clients/lambda-promtail/_index.md +++ b/docs/sources/clients/lambda-promtail/_index.md @@ -1,10 +1,11 @@ --- title: Lambda Promtail +description: Lambda Promtail weight: 20 --- # Lambda Promtail -Grafana Loki includes [Terraform](https://www.terraform.io/) and [CloudFormation](https://aws.amazon.com/cloudformation/) for shipping Cloudwatch and loadbalancer logs to Loki via a [lambda function](https://aws.amazon.com/lambda/). This is done via [lambda-promtail](https://github.com/grafana/loki/tree/master/tools/lambda-promtail) which processes cloudwatch events and propagates them to Loki (or a Promtail instance) via the push-api [scrape config](../promtail/configuration#loki_push_api_config). +Grafana Loki includes [Terraform](https://www.terraform.io/) and [CloudFormation](https://aws.amazon.com/cloudformation/) for shipping Cloudwatch and loadbalancer logs to Loki via a [lambda function](https://aws.amazon.com/lambda/). This is done via [lambda-promtail](https://github.com/grafana/loki/tree/master/tools/lambda-promtail) which processes cloudwatch events and propagates them to Loki (or a Promtail instance) via the push-api [scrape config]({{}}). ## Deployment @@ -55,7 +56,7 @@ To add tenant id add `-var "tenant_id=value"`. Note that the creation of a subscription filter on Cloudwatch in the provided Terraform file only accepts an array of log group names. It does **not** accept strings for regex filtering on the logs contents via the subscription filters. We suggest extending the Terraform file to do so. -Or, have lambda-promtail write to Promtail and use [pipeline stages](https://grafana.com/docs/loki/latest/clients/promtail/stages/drop/). +Or, have lambda-promtail write to Promtail and use [pipeline stages](/docs/loki/latest/clients/promtail/stages/drop/). CloudFormation: ``` @@ -84,7 +85,7 @@ To modify an existing CloudFormation stack, use [update-stack](https://docs.aws. ### Ephemeral Jobs -This workflow is intended to be an effective approach for monitoring ephemeral jobs such as those run on AWS Lambda which are otherwise hard/impossible to monitor via one of the other Loki [clients](../). +This workflow is intended to be an effective approach for monitoring ephemeral jobs such as those run on AWS Lambda which are otherwise hard/impossible to monitor via one of the other Loki [clients]({{}}). Ephemeral jobs can quite easily run afoul of cardinality best practices. During high request load, an AWS lambda function might balloon in concurrency, creating many log streams in Cloudwatch. For this reason lambda-promtail defaults to **not** keeping the log stream value as a label when propagating the logs to Loki. This is only possible because new versions of Loki no longer have an ingestion ordering constraint on logs within a single stream. @@ -110,7 +111,7 @@ Cloudfront [real-time logs](https://docs.aws.amazon.com/AmazonCloudFront/latest/ ## Propagated Labels -Incoming logs can have seven special labels assigned to them which can be used in [relabeling](../promtail/configuration/#relabel_config) or later stages in a Promtail [pipeline](../promtail/pipelines/): +Incoming logs can have seven special labels assigned to them which can be used in [relabeling]({{}}) or later stages in a Promtail [pipeline]({{}}): - `__aws_log_type`: Where this log came from (Cloudwatch, Kinesis or S3). - `__aws_cloudwatch_log_group`: The associated Cloudwatch Log Group for this log. @@ -196,4 +197,4 @@ Instead we can pipeline Cloudwatch logs to a set of Promtails, which can mitigat 1) Using Promtail's push api along with the `use_incoming_timestamp: false` config, we let Promtail determine the timestamp based on when it ingests the logs, not the timestamp assigned by cloudwatch. Obviously, this means that we lose the origin timestamp because Promtail now assigns it, but this is a relatively small difference in a real time ingestion system like this. 2) In conjunction with (1), Promtail can coalesce logs across Cloudwatch log streams because it's no longer susceptible to out-of-order errors when combining multiple sources (lambda invocations). -One important aspect to keep in mind when running with a set of Promtails behind a load balancer is that we're effectively moving the cardinality problems from the number of log streams -> number of Promtails. If you have not configured Loki to [accept out-of-order writes](../../configuration#accept-out-of-order-writes), you'll need to assign a Promtail-specific label on each Promtail so that you don't run into out-of-order errors when the Promtails send data for the same log groups to Loki. This can easily be done via a configuration like `--client.external-labels=promtail=${HOSTNAME}` passed to Promtail. +One important aspect to keep in mind when running with a set of Promtails behind a load balancer is that we're effectively moving the cardinality problems from the number of log streams -> number of Promtails. If you have not configured Loki to [accept out-of-order writes]({{}}), you'll need to assign a Promtail-specific label on each Promtail so that you don't run into out-of-order errors when the Promtails send data for the same log groups to Loki. This can easily be done via a configuration like `--client.external-labels=promtail=${HOSTNAME}` passed to Promtail. diff --git a/docs/sources/clients/logstash/_index.md b/docs/sources/clients/logstash/_index.md index 7bdf2305ba..a96fa4b6a3 100644 --- a/docs/sources/clients/logstash/_index.md +++ b/docs/sources/clients/logstash/_index.md @@ -1,12 +1,13 @@ --- title: Logstash +description: Logstash weight: 70 --- # Logstash Grafana Loki has a [Logstash](https://www.elastic.co/logstash) output plugin called `logstash-output-loki` that enables shipping logs to a Loki -instance or [Grafana Cloud](https://grafana.com/products/cloud/). +instance or [Grafana Cloud](/products/cloud/). ## Installation @@ -105,7 +106,7 @@ Contains a `message` and `@timestamp` fields, which are respectively used to for > You can use a different property for the log line by using the configuration property [`message_field`](#message_field). If you also need to change the timestamp value use the Logstash `date` filter to change the `@timestamp` field. -All other fields (except nested fields) will form the label set (key value pairs) attached to the log line. [This means you're responsible for mutating and dropping high cardinality labels](https://grafana.com/blog/2020/04/21/how-labels-in-loki-can-make-log-queries-faster-and-easier/) such as client IPs. +All other fields (except nested fields) will form the label set (key value pairs) attached to the log line. [This means you're responsible for mutating and dropping high cardinality labels](/blog/2020/04/21/how-labels-in-loki-can-make-log-queries-faster-and-easier/) such as client IPs. You can usually do so by using a [`mutate`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-mutate.html) filter. **Note:** In version 1.1.0 and greater of this plugin you can also specify a list of labels to allowlist via the `include_fields` configuration. @@ -197,12 +198,12 @@ filter { The url of the Loki server to send logs to. When sending data the push path need to also be provided e.g. `http://localhost:3100/loki/api/v1/push`. -If you want to send to [GrafanaCloud](https://grafana.com/products/cloud/) you would use `https://logs-prod-us-central1.grafana.net/loki/api/v1/push`. +If you want to send to [GrafanaCloud](/products/cloud/) you would use `https://logs-prod-us-central1.grafana.net/loki/api/v1/push`. #### username / password Specify a username and password if the Loki server requires basic authentication. -If using the [GrafanaLab's hosted Loki](https://grafana.com/products/cloud/), the username needs to be set to your instance/user id and the password should be a Grafana.com api key. +If using the [GrafanaLab's hosted Loki](/products/cloud/), the username needs to be set to your instance/user id and the password should be a Grafana.com api key. #### message_field diff --git a/docs/sources/clients/promtail/_index.md b/docs/sources/clients/promtail/_index.md index 964939663c..f723211a24 100644 --- a/docs/sources/clients/promtail/_index.md +++ b/docs/sources/clients/promtail/_index.md @@ -1,11 +1,12 @@ --- title: Promtail +description: Promtail weight: 10 --- # Promtail Promtail is an agent which ships the contents of local logs to a private Grafana Loki -instance or [Grafana Cloud](https://grafana.com/oss/loki). It is usually +instance or [Grafana Cloud](/oss/loki). It is usually deployed to every machine that has applications needed to be monitored. It primarily: @@ -34,7 +35,7 @@ Kubernetes API server while `static` usually covers all other use cases. Just like Prometheus, `promtail` is configured using a `scrape_configs` stanza. `relabel_configs` allows for fine-grained control of what to ingest, what to drop, and the final metadata to attach to the log line. Refer to the docs for -[configuring Promtail](configuration/) for more details. +[configuring Promtail]({{}}) for more details. ### Support for compressed files @@ -67,8 +68,8 @@ parsed data to Loki. Important details are: to resume work from the last scraped line and process the rest of the remaining 55%. * Since decompression and pushing can be very fast, depending on the size of your compressed file Loki will rate-limit your ingestion. In that case you - might configure Promtail's [`limits` stage](https://grafana.com/docs/loki/latest/clients/promtail/stages/limit/) to slow the pace or increase - [ingestion limits on Loki](https://grafana.com/docs/loki/latest/configuration/#limits_config). + might configure Promtail's [`limits` stage](/docs/loki/latest/clients/promtail/stages/limit/) to slow the pace or increase + [ingestion limits on Loki](/docs/loki/latest/configuration/#limits_config). * Log rotations **aren't supported as of now**, mostly because it requires us modifying Promtail to rely on file inodes instead of file names. If you'd like to see support for it, please create a new issue on Github asking for it and explaining your use case. @@ -78,7 +79,7 @@ parsed data to Loki. Important details are: ## Loki Push API -Promtail can also be configured to receive logs from another Promtail or any Loki client by exposing the [Loki Push API](../../api#post-lokiapiv1push) with the [loki_push_api](configuration#loki_push_api_config) scrape config. +Promtail can also be configured to receive logs from another Promtail or any Loki client by exposing the [Loki Push API]({{}}) with the [loki_push_api]({{}}) scrape config. There are a few instances where this might be helpful: @@ -88,12 +89,12 @@ There are a few instances where this might be helpful: ## Receiving logs From Syslog -When the [Syslog Target](configuration#syslog_config) is being used, logs +When the [Syslog Target]({{}}) is being used, logs can be written with the syslog protocol to the configured port. ## AWS -If you need to run Promtail on Amazon Web Services EC2 instances, you can use our [detailed tutorial](../aws/ec2/). +If you need to run Promtail on Amazon Web Services EC2 instances, you can use our [detailed tutorial]({{}}). ## Labeling and parsing @@ -106,7 +107,7 @@ To allow more sophisticated filtering afterwards, Promtail allows to set labels not only from service discovery, but also based on the contents of each log line. The `pipeline_stages` can be used to add or update labels, correct the timestamp, or re-write log lines entirely. Refer to the documentation for -[pipelines](pipelines/) for more details. +[pipelines]({{}}) for more details. ## Shipping @@ -132,7 +133,7 @@ This endpoint returns 200 when Promtail is up and running, and there's at least ### `GET /metrics` This endpoint returns Promtail metrics for Prometheus. Refer to -[Observing Grafana Loki](../../operations/observability/) for the list +[Observing Grafana Loki]({{}}) for the list of exported metrics. ### Promtail web server config diff --git a/docs/sources/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md index 4b22fc41f0..1db87d6059 100644 --- a/docs/sources/clients/promtail/configuration.md +++ b/docs/sources/clients/promtail/configuration.md @@ -1,7 +1,8 @@ --- title: Configuration +description: Configuring Promtaim --- -# Configuring Promtail +# Configuration Promtail is configured in a YAML file (usually referred to as `config.yaml`) which contains information on the Promtail server, where positions are stored, @@ -34,8 +35,8 @@ defined by the schema below. Brackets indicate that a parameter is optional. For non-list parameters the value is set to the specified default. For more detailed information on configuring how to discover and scrape logs from -targets, see [Scraping](../scraping/). For more information on transforming logs -from scraped targets, see [Pipelines](../pipelines/). +targets, see [Scraping]({{}}). For more information on transforming logs +from scraped targets, see [Pipelines]({{}}). ### Use environment variables in the configuration @@ -394,7 +395,7 @@ docker_sd_configs: ### pipeline_stages -[Pipeline](../pipelines/) stages are used to transform log entries and their labels. The pipeline is executed after the discovery process finishes. The `pipeline_stages` object consists of a list of stages which correspond to the items listed below. +[Pipeline]({{}}) stages are used to transform log entries and their labels. The pipeline is executed after the discovery process finishes. The `pipeline_stages` object consists of a list of stages which correspond to the items listed below. In most cases, you extract data from logs with `regex` or `json` stages. The extracted data is transformed into a temporary map object. The data can then be used by Promtail e.g. as values for `labels` or as an `output`. Additionally any other stage aside from `docker` and `cri` can access the extracted data. @@ -540,7 +541,7 @@ template: #### match The match stage conditionally executes a set of stages when a log entry matches -a configurable [LogQL](../../../logql/) stream selector. +a configurable [LogQL]({{}}) stream selector. ```yaml match: @@ -806,8 +807,8 @@ Promtail needs to wait for the next message to catch multi-line messages, therefore delays between messages can occur. See recommended output configurations for -[syslog-ng](../scraping#syslog-ng-output-configuration) and -[rsyslog](../scraping#rsyslog-output-configuration). Both configurations enable +[syslog-ng]({{}}) and +[rsyslog]({{}}). Both configurations enable IETF Syslog with octet-counting. You may need to increase the open files limit for the Promtail process @@ -861,7 +862,7 @@ max_message_length: ### loki_push_api -The `loki_push_api` block configures Promtail to expose a [Loki push API](../../../api#post-lokiapiv1push) server. +The `loki_push_api` block configures Promtail to expose a [Loki push API]({{}}) server. Each job configured with a `loki_push_api` will expose this API and will require a separate port. @@ -990,7 +991,7 @@ labels: ### Available Labels -When Promtail receives GCP logs, various internal labels are made available for [relabeling](#relabeling). This depends on the subscription type chosen. +When Promtail receives GCP logs, various internal labels are made available for [relabeling](#relabel_configs). This depends on the subscription type chosen. **Internal labels available for pull** @@ -1120,7 +1121,7 @@ Each GELF message received will be encoded in JSON as the log line. For example: {"version":"1.1","host":"example.org","short_message":"A short message","timestamp":1231231123,"level":5,"_some_extra":"extra"} ``` -You can leverage [pipeline stages](pipeline_stages) with the GELF target, +You can leverage [pipeline stages]({{}}) with the GELF target, if for example, you want to parse the log line and extract more labels or change the log line format. ```yaml @@ -1276,7 +1277,7 @@ All Cloudflare logs are in JSON. Here is an example: } ``` -You can leverage [pipeline stages](pipeline_stages) if, for example, you want to parse the JSON log line and extract more labels or change the log line format. +You can leverage [pipeline stages]({{}}) if, for example, you want to parse the JSON log line and extract more labels or change the log line format. ### heroku_drain @@ -1455,7 +1456,7 @@ As a fallback, the file contents are also re-read periodically at the specified refresh interval. Each target has a meta label `__meta_filepath` during the -[relabeling phase](#relabel_config). Its value is set to the +[relabeling phase](#relabel_configs). Its value is set to the filepath from which the target was extracted. ```yaml @@ -1978,7 +1979,7 @@ The `tracing` block configures tracing for Jaeger. Currently, limited to configu ## Example Docker Config -It's fairly difficult to tail Docker files on a standalone machine because they are in different locations for every OS. We recommend the [Docker logging driver](../../docker-driver/) for local Docker installs or Docker Compose. +It's fairly difficult to tail Docker files on a standalone machine because they are in different locations for every OS. We recommend the [Docker logging driver]({{}}) for local Docker installs or Docker Compose. If running in a Kubernetes environment, you should look at the defined configs which are in [helm](https://github.com/grafana/helm-charts/blob/main/charts/promtail/templates/configmap.yaml) and [jsonnet](https://github.com/grafana/loki/tree/master/production/ksonnet/promtail/scrape_config.libsonnet), these leverage the prometheus service discovery libraries (and give Promtail it's name) for automatically finding and tailing pods. The jsonnet config explains with comments what each section is for. diff --git a/docs/sources/clients/promtail/gcplog-cloud.md b/docs/sources/clients/promtail/gcplog-cloud.md index 901946859d..c72efc973b 100644 --- a/docs/sources/clients/promtail/gcplog-cloud.md +++ b/docs/sources/clients/promtail/gcplog-cloud.md @@ -1,7 +1,8 @@ --- title: Cloud setup GCP Logs +description: Cloud setup GCP logs --- -# Cloud setup GCP logs +# Cloud setup GCP Logs This document explain how one can setup Google Cloud Platform to forward its cloud resource logs from a particular GCP project into Google Pubsub topic so that is available for Promtail to consume. @@ -123,7 +124,7 @@ gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \ --role='roles/iam.serviceAccountTokenCreator' ``` -Having configured Promtail with the [GCP Logs Push target](./#push), hosted in an internet-facing and HTTPS enabled deployment, we can continue with creating +Having configured Promtail with the [GCP Logs Push target](#push), hosted in an internet-facing and HTTPS enabled deployment, we can continue with creating the push subscription. ```bash @@ -230,7 +231,7 @@ We need a service account with following permissions. This enables Promtail to read log entries from the pubsub subscription created before. -you can find example for Promtail scrape config for `gcplog` [here](../scraping/#gcplog-scraping) +you can find example for Promtail scrape config for `gcplog` [here]({{}}) If you are scraping logs from multiple GCP projects, then this serviceaccount should have above permissions in all the projects you are tyring to scrape. diff --git a/docs/sources/clients/promtail/installation.md b/docs/sources/clients/promtail/installation.md index a19d72456f..005a34a3dd 100644 --- a/docs/sources/clients/promtail/installation.md +++ b/docs/sources/clients/promtail/installation.md @@ -1,7 +1,8 @@ --- title: Installation +description: Install Promtail --- -# Install Promtail +# Installation Promtail is distributed as a binary, in a Docker container, or there is a Helm chart to install it in a Kubernetes cluster. diff --git a/docs/sources/clients/promtail/logrotation.md b/docs/sources/clients/promtail/logrotation/_index.md similarity index 93% rename from docs/sources/clients/promtail/logrotation.md rename to docs/sources/clients/promtail/logrotation/_index.md index 2207c9f531..fc8b697394 100644 --- a/docs/sources/clients/promtail/logrotation.md +++ b/docs/sources/clients/promtail/logrotation/_index.md @@ -1,12 +1,13 @@ --- title: Promtail and Log Rotation +description: Promtail and Log Rotation --- # Promtail and Log Rotation ## Why does log rotation matters? At any point in time, there may be three processes working on a log file as shown in the image below. -![block_diagram](../logrotation-components.png) +![block_diagram](./logrotation-components.png) 1. Appender - A writer that keeps appending to a log file. This can be your application or some system daemons like Syslog, Docker log driver or Kubelet, etc. 2. Tailer - A reader that reads log lines as they are appended, for example, agents like Promtail. @@ -28,10 +29,10 @@ In both cases, after log rotation, all new log lines are written to the original These two methods of log rotation are shown in the following images. ### Copy and Truncate -![block_diagram](../logrotation-copy-and-truncate.png) +![block_diagram](./logrotation-copy-and-truncate.png) ### Rename and Create -![block_diagram](../logrotation-rename-and-create.png) +![block_diagram](./logrotation-rename-and-create.png) Both types of log rotation seem to give the same result. However, there are some subtle differences. @@ -81,7 +82,7 @@ Here `create` mode works like (2) explained above. The `create` mode is optional ### Kubernetes -[Kubernetes Service Discovery in Promtail]({{}}#kubernetes-discovery) also uses file-based scraping. Meaning, logs from your pods are stored on the nodes and Promtail scrapes the pod logs from the node files. +[Kubernetes Service Discovery in Promtail]({{}}) also uses file-based scraping. Meaning, logs from your pods are stored on the nodes and Promtail scrapes the pod logs from the node files. You can [configure](https://kubernetes.io/docs/concepts/cluster-administration/logging/#log-rotation) the `kubelet` process running on each node to manage log rotation via two configuration settings. @@ -138,4 +139,4 @@ If neither `kubelet` nor `CRI` is configured for rotating logs, then the `logrot Promtail uses `polling` to watch for file changes. A `polling` mechanism combined with a [copy and truncate](#copy-and-truncate) log rotation may result in losing some logs. As explained earlier in this topic, this happens when the file is truncated before Promtail reads all the log lines from such a file. -Therefore, for a long-term solution, we strongly recommend changing the log rotation strategy to [rename and create](#rename-and-create). Alternatively, as a workaround in the short term, you can tweak the promtail client's `batchsize` [config]({{}}/#clients) to set higher values (like 5M or 8M). This gives Promtail more room to read loglines without frequently waiting for push responses from the Loki server. +Therefore, for a long-term solution, we strongly recommend changing the log rotation strategy to [rename and create](#rename-and-create). Alternatively, as a workaround in the short term, you can tweak the promtail client's `batchsize` [config]({{}}) to set higher values (like 5M or 8M). This gives Promtail more room to read loglines without frequently waiting for push responses from the Loki server. diff --git a/docs/sources/clients/promtail/logrotation-components.png b/docs/sources/clients/promtail/logrotation/logrotation-components.png similarity index 100% rename from docs/sources/clients/promtail/logrotation-components.png rename to docs/sources/clients/promtail/logrotation/logrotation-components.png diff --git a/docs/sources/clients/promtail/logrotation-copy-and-truncate.png b/docs/sources/clients/promtail/logrotation/logrotation-copy-and-truncate.png similarity index 100% rename from docs/sources/clients/promtail/logrotation-copy-and-truncate.png rename to docs/sources/clients/promtail/logrotation/logrotation-copy-and-truncate.png diff --git a/docs/sources/clients/promtail/logrotation-rename-and-create.png b/docs/sources/clients/promtail/logrotation/logrotation-rename-and-create.png similarity index 100% rename from docs/sources/clients/promtail/logrotation-rename-and-create.png rename to docs/sources/clients/promtail/logrotation/logrotation-rename-and-create.png diff --git a/docs/sources/clients/promtail/pipelines.md b/docs/sources/clients/promtail/pipelines.md index 3eec9f760d..1fb80ea4ae 100644 --- a/docs/sources/clients/promtail/pipelines.md +++ b/docs/sources/clients/promtail/pipelines.md @@ -1,5 +1,6 @@ --- title: Pipelines +description: Pipelines --- # Pipelines @@ -25,13 +26,13 @@ stages: condition. Typical pipelines will start with a parsing stage (such as a -[regex](../stages/regex/) or [json](../stages/json/) stage) to extract data +[regex]({{}}) or [json]({{}}) stage) to extract data from the log line. Then, a series of action stages will be present to do something with that extracted data. The most common action stage will be a -[labels](../stages/labels/) stage to turn extracted data into a label. +[labels]({{}}) stage to turn extracted data into a label. -A common stage will also be the [match](../stages/match/) stage to selectively -apply stages or drop entries based on a [LogQL stream selector and filter expressions](../../../logql/). +A common stage will also be the [match]({{}}) stage to selectively +apply stages or drop entries based on a [LogQL stream selector and filter expressions]({{}}). Note that pipelines can not currently be used to deduplicate logs; Grafana Loki will receive the same log line multiple times if, for example: @@ -199,26 +200,26 @@ given log entry. Parsing stages: - - [docker](../stages/docker/): Extract data by parsing the log line using the standard Docker format. - - [cri](../stages/cri/): Extract data by parsing the log line using the standard CRI format. - - [regex](../stages/regex/): Extract data using a regular expression. - - [json](../stages/json/): Extract data by parsing the log line as JSON. + - [docker]({{}}): Extract data by parsing the log line using the standard Docker format. + - [cri]({{}}): Extract data by parsing the log line using the standard CRI format. + - [regex]({{}}): Extract data using a regular expression. + - [json]({{}}): Extract data by parsing the log line as JSON. Transform stages: - - [multiline](../stages/multiline/): Merges multiple lines, e.g. stack traces, into multiline blocks. - - [template](../stages/template/): Use Go templates to modify extracted data. + - [multiline]({{}}): Merges multiple lines, e.g. stack traces, into multiline blocks. + - [template]({{}}): Use Go templates to modify extracted data. Action stages: - - [timestamp](../stages/timestamp/): Set the timestamp value for the log entry. - - [output](../stages/output/): Set the log line text. - - [labels](../stages/labels/): Update the label set for the log entry. - - [metrics](../stages/metrics/): Calculate metrics based on extracted data. - - [tenant](../stages/tenant/): Set the tenant ID value to use for the log entry. + - [timestamp]({{}}): Set the timestamp value for the log entry. + - [output]({{}}): Set the log line text. + - [labels]({{}}): Update the label set for the log entry. + - [metrics]({{}}): Calculate metrics based on extracted data. + - [tenant]({{}}): Set the tenant ID value to use for the log entry. Filtering stages: - - [match](../stages/match/): Conditionally run stages based on the label set. - - [drop](../stages/drop/): Conditionally drop log lines based on several options. - - [limit](../stages/limit/): Conditionally rate limit log lines based on several options. + - [match]({{}}): Conditionally run stages based on the label set. + - [drop]({{}}): Conditionally drop log lines based on several options. + - [limit]({{}}): Conditionally rate limit log lines based on several options. diff --git a/docs/sources/clients/promtail/scraping.md b/docs/sources/clients/promtail/scraping.md index 8405459838..f6017e21ff 100644 --- a/docs/sources/clients/promtail/scraping.md +++ b/docs/sources/clients/promtail/scraping.md @@ -1,7 +1,8 @@ --- title: Scraping +description: Promtail Scraping (Service Discovery) --- -# Promtail Scraping (Service Discovery) +# Scraping ## File Target Discovery @@ -222,7 +223,7 @@ Here `project_id` and `subscription` are the only required fields. - `project_id` is the GCP project id. - `subscription` is the GCP pubsub subscription where Promtail can consume log entries from. -Before using `gcplog` target, GCP should be [configured](../gcplog-cloud) with pubsub subscription to receive logs from. +Before using `gcplog` target, GCP should be [configured]({{}}) with pubsub subscription to receive logs from. It also supports `relabeling` and `pipeline` stages just like other targets. @@ -256,7 +257,7 @@ section. This server exposes the single endpoint `POST /gcp/api/v1/push`, respon For Google's PubSub to be able to send logs, **Promtail server must be publicly accessible, and support HTTPS**. For that, Promtail can be deployed as part of a larger orchestration service like Kubernetes, which can handle HTTPS traffic through an ingress, or it can be hosted behind -a proxy/gateway, offloading the HTTPS to that component and routing the request to Promtail. Once that's solved, GCP can be [configured](../gcplog-cloud) +a proxy/gateway, offloading the HTTPS to that component and routing the request to Promtail. Once that's solved, GCP can be [configured]({{}}) to send logs to Promtail. It also supports `relabeling` and `pipeline` stages. @@ -558,5 +559,5 @@ clients: - [ ] ``` -Refer to [`client_config`]({{< relref "configuration#client_config" >}}) from the Promtail +Refer to [`client_config`]({{< relref "configuration#clients" >}}) from the Promtail Configuration reference for all available options. diff --git a/docs/sources/clients/promtail/stages/_index.md b/docs/sources/clients/promtail/stages/_index.md index a20cd1c284..bdb2c1e488 100644 --- a/docs/sources/clients/promtail/stages/_index.md +++ b/docs/sources/clients/promtail/stages/_index.md @@ -1,40 +1,41 @@ --- title: Stages +description: Stages --- # Stages This section is a collection of all stages Promtail supports in a -[Pipeline](../pipelines/). +[Pipeline]({{}}). Parsing stages: - - [docker](docker/): Extract data by parsing the log line using the standard Docker format. - - [cri](cri/): Extract data by parsing the log line using the standard CRI format. - - [regex](regex/): Extract data using a regular expression. - - [json](json/): Extract data by parsing the log line as JSON. - - [logfmt](logfmt/): Extract data by parsing the log line as logfmt. - - [replace](replace/): Replace data using a regular expression. - - [multiline](multiline/): Merge multiple lines into a multiline block. + - [docker]({{}}): Extract data by parsing the log line using the standard Docker format. + - [cri]({{}}): Extract data by parsing the log line using the standard CRI format. + - [regex]({{}}): Extract data using a regular expression. + - [json]({{}}): Extract data by parsing the log line as JSON. + - [logfmt]({{}}): Extract data by parsing the log line as logfmt. + - [replace]({{}}): Replace data using a regular expression. + - [multiline]({{}}): Merge multiple lines into a multiline block. Transform stages: - - [template](template/): Use Go templates to modify extracted data. - - [pack](pack/): Packs a log line in a JSON object allowing extracted values and labels to be placed inside the log line. - - [decolorize](decolorize/): Strips ANSI color sequences from the log line. + - [template]({{}}): Use Go templates to modify extracted data. + - [pack]({{}}): Packs a log line in a JSON object allowing extracted values and labels to be placed inside the log line. + - [decolorize]({{}}): Strips ANSI color sequences from the log line. Action stages: - - [timestamp](timestamp/): Set the timestamp value for the log entry. - - [output](output/): Set the log line text. - - [labeldrop](labeldrop/): Drop label set for the log entry. - - [labelallow](labelallow/): Allow label set for the log entry. - - [labels](labels/): Update the label set for the log entry. - - [limit](limit/): Limit the rate lines will be sent to Loki. - - [static_labels](static_labels/): Add static-labels to the log entry. - - [metrics](metrics/): Calculate metrics based on extracted data. - - [tenant](tenant/): Set the tenant ID value to use for the log entry. + - [timestamp]({{}}): Set the timestamp value for the log entry. + - [output]({{}}): Set the log line text. + - [labeldrop]({{}}): Drop label set for the log entry. + - [labelallow]({{}}): Allow label set for the log entry. + - [labels]({{}}): Update the label set for the log entry. + - [limit]({{}}): Limit the rate lines will be sent to Loki. + - [static_labels]({{}}): Add static-labels to the log entry. + - [metrics]({{}}): Calculate metrics based on extracted data. + - [tenant]({{}}): Set the tenant ID value to use for the log entry. Filtering stages: - - [match](match/): Conditionally run stages based on the label set. - - [drop](drop/): Conditionally drop log lines based on several options. + - [match]({{}}): Conditionally run stages based on the label set. + - [drop]({{}}): Conditionally drop log lines based on several options. diff --git a/docs/sources/clients/promtail/stages/cri.md b/docs/sources/clients/promtail/stages/cri.md index 8f13825682..1df68af1d5 100644 --- a/docs/sources/clients/promtail/stages/cri.md +++ b/docs/sources/clients/promtail/stages/cri.md @@ -1,7 +1,8 @@ --- title: cri +description: cri stage --- -# `cri` stage +# cri The `cri` stage is a parsing stage that reads the log line using the standard CRI logging format. diff --git a/docs/sources/clients/promtail/stages/decolorize.md b/docs/sources/clients/promtail/stages/decolorize.md index dfb5908876..af015262cb 100644 --- a/docs/sources/clients/promtail/stages/decolorize.md +++ b/docs/sources/clients/promtail/stages/decolorize.md @@ -1,7 +1,8 @@ --- title: decolorize +description: decolorize stage --- -# `decolorize` stage +# decolorize The `decolorize` stage is a transform stage that lets you strip ANSI color codes from the log line, thus making it easier to diff --git a/docs/sources/clients/promtail/stages/docker.md b/docs/sources/clients/promtail/stages/docker.md index 5e93064611..40adafcdcf 100644 --- a/docs/sources/clients/promtail/stages/docker.md +++ b/docs/sources/clients/promtail/stages/docker.md @@ -1,7 +1,8 @@ --- title: docker +description: docker stage --- -# `docker` stage +# docker The `docker` stage is a parsing stage that reads log lines in the standard format of Docker log files. diff --git a/docs/sources/clients/promtail/stages/drop.md b/docs/sources/clients/promtail/stages/drop.md index ef90ce8560..1728156210 100644 --- a/docs/sources/clients/promtail/stages/drop.md +++ b/docs/sources/clients/promtail/stages/drop.md @@ -1,7 +1,8 @@ --- title: drop +description: drop stage --- -# `drop` stage +# drop The `drop` stage is a filtering stage that lets you drop logs based on several options. @@ -106,7 +107,7 @@ Would drop this log line: #### Drop old log lines -**NOTE** For `older_than` to work, you must be using the [timestamp](../timestamp) stage to set the timestamp from the ingested log line _before_ applying the `drop` stage. +**NOTE** For `older_than` to work, you must be using the [timestamp]({{}}) stage to set the timestamp from the ingested log line _before_ applying the `drop` stage. Given the pipeline: diff --git a/docs/sources/clients/promtail/stages/json.md b/docs/sources/clients/promtail/stages/json.md index 5754c5baae..d6a75201e6 100644 --- a/docs/sources/clients/promtail/stages/json.md +++ b/docs/sources/clients/promtail/stages/json.md @@ -1,7 +1,8 @@ --- title: json +description: json stage --- -# `json` stage +# json The `json` stage is a parsing stage that reads the log line as JSON and accepts [JMESPath](http://jmespath.org/) expressions to extract data. @@ -32,7 +33,7 @@ This stage uses the Go JSON unmarshaler, which means non-string types like numbers or booleans will be unmarshaled into those types. The extracted data can hold non-string values and this stage does not do any type conversions; downstream stages will need to perform correct type conversion of these values -as necessary. Please refer to the [the `template` stage](../template/) for how +as necessary. Please refer to the [the `template` stage]({{}}) for how to do this. If the value extracted is a complex type, such as an array or a JSON object, it diff --git a/docs/sources/clients/promtail/stages/labelallow.md b/docs/sources/clients/promtail/stages/labelallow.md index 27b0c8bbea..3143c96861 100644 --- a/docs/sources/clients/promtail/stages/labelallow.md +++ b/docs/sources/clients/promtail/stages/labelallow.md @@ -1,7 +1,8 @@ --- title: labelallow +description: labelallow stage --- -# `labelallow` stage +# labelallow The labelallow stage is an action stage that allows only the provided labels to be included in the label set that is sent to Loki with the log entry. diff --git a/docs/sources/clients/promtail/stages/labeldrop.md b/docs/sources/clients/promtail/stages/labeldrop.md index e5b08be2e1..be768eb7af 100644 --- a/docs/sources/clients/promtail/stages/labeldrop.md +++ b/docs/sources/clients/promtail/stages/labeldrop.md @@ -1,7 +1,8 @@ --- title: labeldrop +description: labeldrop stage --- -# `labeldrop` stage +# labeldrop The labeldrop stage is an action stage that drops labels from the label set that is sent to Loki with the log entry. diff --git a/docs/sources/clients/promtail/stages/labels.md b/docs/sources/clients/promtail/stages/labels.md index 9d4cf2a1f0..57fd896ea4 100644 --- a/docs/sources/clients/promtail/stages/labels.md +++ b/docs/sources/clients/promtail/stages/labels.md @@ -1,7 +1,8 @@ --- title: labels +description: labels stage --- -# `labels` stage +# labels The labels stage is an action stage that takes data from the extracted map and modifies the label set that is sent to Loki with the log entry. diff --git a/docs/sources/clients/promtail/stages/limit.md b/docs/sources/clients/promtail/stages/limit.md index b98f565e2b..6a78d8417a 100644 --- a/docs/sources/clients/promtail/stages/limit.md +++ b/docs/sources/clients/promtail/stages/limit.md @@ -1,14 +1,15 @@ --- title: limit +description: limit stage --- -# `limit` stage +# limit The `limit` stage is a rate-limiting stage that throttles logs based on several options. ## Limit stage schema This pipeline stage places limits on the rate or burst quantity of log lines that Promtail pushes to Loki. -The concept of having distinct burst and rate limits mirrors the approach to limits that can be set for Loki's distributor component: `ingestion_rate_mb` and `ingestion_burst_size_mb`, as defined in [limits_config](../../../../configuration/#limits_config). +The concept of having distinct burst and rate limits mirrors the approach to limits that can be set for Loki's distributor component: `ingestion_rate_mb` and `ingestion_burst_size_mb`, as defined in [limits_config]({{}}). ```yaml limit: @@ -77,4 +78,4 @@ Given the pipeline: ``` Would ratelimit messages originating from each namespace independently. -Any message without namespace label will not be ratelimited. \ No newline at end of file +Any message without namespace label will not be ratelimited. diff --git a/docs/sources/clients/promtail/stages/logfmt.md b/docs/sources/clients/promtail/stages/logfmt.md index 7788ad2c77..2ed790b8dc 100644 --- a/docs/sources/clients/promtail/stages/logfmt.md +++ b/docs/sources/clients/promtail/stages/logfmt.md @@ -3,7 +3,7 @@ title: logfmt menuTitle: logfmt description: The logfmt parsing stage reads logfmt log lines and extracts the data into labels. --- -# `logfmt` stage +# logfmt The `logfmt` stage is a parsing stage that reads the log line as [logfmt](https://brandur.org/logfmt) and allows extraction of data into labels. @@ -25,7 +25,7 @@ This stage uses the [go-logfmt](https://github.com/go-logfmt/logfmt) unmarshaler numbers or booleans will be unmarshaled into those types. The extracted data can hold non-string values, and this stage does not do any type conversions; downstream stages will need to perform correct type conversion of these values -as necessary. Please refer to the [`template` stage](../template/) for how +as necessary. Please refer to the [`template` stage]({{}}) for how to do this. If the value extracted is a complex type, its value is extracted as a string. @@ -85,4 +85,4 @@ extracted data: The second stage will parse the value of `extra` from the extracted data as logfmt and append the following key-value pairs to the set of extracted data: -- `user`: `foo` \ No newline at end of file +- `user`: `foo` diff --git a/docs/sources/clients/promtail/stages/match.md b/docs/sources/clients/promtail/stages/match.md index cf4aa35a54..1f10057c2e 100644 --- a/docs/sources/clients/promtail/stages/match.md +++ b/docs/sources/clients/promtail/stages/match.md @@ -1,10 +1,11 @@ --- title: match +description: match stage --- -# `match` stage +# match The match stage is a filtering stage that conditionally applies a set of stages -or drop entries when a log entry matches a configurable [LogQL](../../../../logql/) +or drop entries when a log entry matches a configurable [LogQL]({{}}) stream selector and filter expressions. ## Schema @@ -47,7 +48,7 @@ match: ] ``` -Refer to the [Promtail Configuration Reference](../../configuration/) for the +Refer to the [Promtail Configuration Reference]({{}}) for the schema on the various other stages referenced here. ### Example diff --git a/docs/sources/clients/promtail/stages/metrics.md b/docs/sources/clients/promtail/stages/metrics.md index 5658e96ba2..c384e10d79 100644 --- a/docs/sources/clients/promtail/stages/metrics.md +++ b/docs/sources/clients/promtail/stages/metrics.md @@ -1,7 +1,8 @@ --- title: metrics +description: metrics stage --- -# `metrics` stage +# metrics The `metrics` stage is an action stage that allows for defining and updating metrics based on data from the extracted map. Note that created metrics are not diff --git a/docs/sources/clients/promtail/stages/multiline.md b/docs/sources/clients/promtail/stages/multiline.md index 7926137067..b46a5e7d0e 100644 --- a/docs/sources/clients/promtail/stages/multiline.md +++ b/docs/sources/clients/promtail/stages/multiline.md @@ -1,8 +1,9 @@ --- -title: multiline +title: multiline +description: multiline stage --- -# `multiline` stage +# multiline The `multiline` stage merges multiple lines into a multiline block before passing it on to the next stage in the pipeline. diff --git a/docs/sources/clients/promtail/stages/output.md b/docs/sources/clients/promtail/stages/output.md index c57d937d89..143d9a08f4 100644 --- a/docs/sources/clients/promtail/stages/output.md +++ b/docs/sources/clients/promtail/stages/output.md @@ -1,7 +1,8 @@ --- title: output +description: output stage --- -# `output` stage +# output The `output` stage is an action stage that takes data from the extracted map and changes the log line that will be sent to Loki. diff --git a/docs/sources/clients/promtail/stages/pack.md b/docs/sources/clients/promtail/stages/pack.md index 0456da931d..d2371ec6ed 100644 --- a/docs/sources/clients/promtail/stages/pack.md +++ b/docs/sources/clients/promtail/stages/pack.md @@ -1,7 +1,8 @@ --- title: pack +description: pack stage --- -# `pack` stage +# pack The `pack` stage is a transform stage which lets you embed extracted values and labels into the log line by packing the log line and labels inside a JSON object. @@ -57,7 +58,7 @@ This would create a log line } ``` -**Loki 2.2 also includes a new [`unpack` parser]({{< relref "../../../logql/log_queries.md#unpack" >}}) to work with the pack stage.** +**Loki 2.2 also includes a new [`unpack` parser]({{< relref "../../../logql/log_queries/#unpack" >}}) to work with the pack stage.** For example: diff --git a/docs/sources/clients/promtail/stages/regex.md b/docs/sources/clients/promtail/stages/regex.md index e81913e81a..fb585bacc5 100644 --- a/docs/sources/clients/promtail/stages/regex.md +++ b/docs/sources/clients/promtail/stages/regex.md @@ -1,7 +1,8 @@ --- title: regex +description: regex stage --- -# `regex` stage +# regex The `regex` stage is a parsing stage that parses a log line using a regular expression. Named capture groups in the regex support adding data into the diff --git a/docs/sources/clients/promtail/stages/replace.md b/docs/sources/clients/promtail/stages/replace.md index 931dedcb16..d3bc77de9a 100644 --- a/docs/sources/clients/promtail/stages/replace.md +++ b/docs/sources/clients/promtail/stages/replace.md @@ -1,7 +1,8 @@ --- title: replace +description: replace stage --- -# `replace` stage +# replace The `replace` stage is a parsing stage that parses a log line using a regular expression and replaces the log line. Named capture groups in the regex support adding data into the diff --git a/docs/sources/clients/promtail/stages/static_labels.md b/docs/sources/clients/promtail/stages/static_labels.md index 1d36f5debf..72c1bd472f 100644 --- a/docs/sources/clients/promtail/stages/static_labels.md +++ b/docs/sources/clients/promtail/stages/static_labels.md @@ -1,7 +1,8 @@ --- title: static_labels +description: static_labels stage --- -# `static_labels` stage +# static_labels The static_labels stage is an action stage that adds static-labels to the label set that is sent to Loki with the log entry. diff --git a/docs/sources/clients/promtail/stages/template.md b/docs/sources/clients/promtail/stages/template.md index 1f8ec0aec0..2fa30ec9b5 100644 --- a/docs/sources/clients/promtail/stages/template.md +++ b/docs/sources/clients/promtail/stages/template.md @@ -1,7 +1,8 @@ --- title: template +description: template stage --- -# `template` stage +# template The `template` stage is a transform stage that lets use manipulate the values in the extracted map using [Go's template diff --git a/docs/sources/clients/promtail/stages/tenant.md b/docs/sources/clients/promtail/stages/tenant.md index e6dfb6b354..4791e8413e 100644 --- a/docs/sources/clients/promtail/stages/tenant.md +++ b/docs/sources/clients/promtail/stages/tenant.md @@ -1,11 +1,12 @@ --- title: tenant +description: tenant stage --- -# `tenant` stage +# tenant The tenant stage is an action stage that sets the tenant ID for the log entry picking it from a field in the extracted data map. If the field is missing, the -default promtail client [`tenant_id`](../../configuration#client_config) will +default promtail client [`tenant_id`]({{}}) will be used. diff --git a/docs/sources/clients/promtail/stages/timestamp.md b/docs/sources/clients/promtail/stages/timestamp.md index 4d1dedd7e6..e774526494 100644 --- a/docs/sources/clients/promtail/stages/timestamp.md +++ b/docs/sources/clients/promtail/stages/timestamp.md @@ -1,7 +1,8 @@ --- title: timestamp +description: timestamp stage --- -# `timestamp` stage +# timestamp The `timestamp` stage is an action stage that can change the timestamp of a log line before it is sent to Loki. When a `timestamp` stage is not present, the diff --git a/docs/sources/clients/promtail/troubleshooting.md b/docs/sources/clients/promtail/troubleshooting/_index.md similarity index 91% rename from docs/sources/clients/promtail/troubleshooting.md rename to docs/sources/clients/promtail/troubleshooting/_index.md index 7c36f56743..e4176b095d 100644 --- a/docs/sources/clients/promtail/troubleshooting.md +++ b/docs/sources/clients/promtail/troubleshooting/_index.md @@ -1,7 +1,8 @@ --- title: Troubleshooting +description: Troubleshooting Promtail --- -# Troubleshooting Promtail +# Troubleshooting This document describes known failure modes of Promtail on edge cases and the adopted trade-offs. @@ -11,7 +12,7 @@ adopted trade-offs. Promtail can be configured to print log stream entries instead of sending them to Loki. This can be used in combination with [piping data](#pipe-data-to-promtail) to debug or troubleshoot Promtail log parsing. -In dry run mode, Promtail still support reading from a [positions](../configuration#position_config) file however no update will be made to the targeted file, this is to ensure you can easily retry the same set of lines. +In dry run mode, Promtail still support reading from a [positions]({{}}) file however no update will be made to the targeted file, this is to ensure you can easily retry the same set of lines. To start Promtail in dry run mode use the flag `--dry-run` as shown in the example below: @@ -45,7 +46,7 @@ Enable the inspection output using the `--inspect` command-line option. The `--i cat my.log | promtail --stdin --dry-run --inspect --client.url http://127.0.0.1:3100/loki/api/v1/push ``` -![screenshot](../inspect.png) +![screenshot](./inspect.png) The output uses color to highlight changes. Additions are in green, modifications in yellow, and removals in red. @@ -74,9 +75,9 @@ This will add labels `k1` and `k2` with respective values `v1` and `v2`. In pipe mode Promtail also support file configuration using `--config.file`, however do note that positions config is not used and only **the first scrape config is used**. -[`static_configs:`](../configuration) can be used to provide static labels, although the targets property is ignored. +[`static_configs:`]({{}}) can be used to provide static labels, although the targets property is ignored. -If you don't provide any [`scrape_config:`](../configuration#scrape_config) a default one is used which will automatically adds the following default labels: `{job="stdin",hostname=""}`. +If you don't provide any [`scrape_config:`]({{}}) a default one is used which will automatically adds the following default labels: `{job="stdin",hostname=""}`. For example you could use this config below to parse and add the label `level` on all your piped logs: @@ -196,7 +197,7 @@ from there. This means that if new log entries have been read and pushed to the ingester between the last sync period and the crash, these log entries will be sent again to the ingester on Promtail restart. -If Loki is not configured to [accept out-of-order writes](../../../configuration/#accept-out-of-order-writes), Loki will reject all log lines received in +If Loki is not configured to [accept out-of-order writes]({{}}), Loki will reject all log lines received in what it perceives is out of order. If Promtail happens to crash, it may re-send log lines that were sent prior to the crash. The default diff --git a/docs/sources/clients/promtail/inspect.png b/docs/sources/clients/promtail/troubleshooting/inspect.png similarity index 100% rename from docs/sources/clients/promtail/inspect.png rename to docs/sources/clients/promtail/troubleshooting/inspect.png diff --git a/docs/sources/community/_index.md b/docs/sources/community/_index.md index c316e80fb9..478192db35 100644 --- a/docs/sources/community/_index.md +++ b/docs/sources/community/_index.md @@ -1,9 +1,10 @@ --- title: Community +description: Community weight: 1100 --- # Community -1. [Governance](governance/) -1. [Getting in Touch](getting-in-touch/) -1. [Contributing](contributing/) +1. [Governance]({{}}) +1. [Getting in Touch]({{}}) +1. [Contributing]({{}}) diff --git a/docs/sources/community/contributing.md b/docs/sources/community/contributing.md index 7ef2a2fb31..182af5de63 100644 --- a/docs/sources/community/contributing.md +++ b/docs/sources/community/contributing.md @@ -1,5 +1,6 @@ --- title: Contributing to Loki +description: Contributing to Loki --- # Contributing to Loki diff --git a/docs/sources/community/getting-in-touch.md b/docs/sources/community/getting-in-touch.md index 49c495ad75..12c0843b9a 100644 --- a/docs/sources/community/getting-in-touch.md +++ b/docs/sources/community/getting-in-touch.md @@ -1,13 +1,14 @@ --- title: Contacting the Loki Team +description: Contacting the Loki Team --- # Contacting the Loki Team For questions regarding Loki: - Open source Loki users are welcome to post technical questions on the Grafana Labs Community Forums under the Grafana Loki category at [community.grafana.com](https://community.grafana.com). Please be mindful that this is a community-driven support channel moderated by Grafana Labs staff where Loki maintainers and community members answer questions when bandwidth allows. Be sure to review the [Community Guidelines](https://community.grafana.com/guidelines) before posting. -- Users deploying Loki via [Grafana Cloud](https://grafana.com/products/cloud/) can submit support tickets via the [Grafana.com Account Portal](https://grafana.com/login). -- For questions regarding Enterprise support for Loki, you can get in touch with the Grafana Labs team [here](https://grafana.com/contact?pg=docs). +- Users deploying Loki via [Grafana Cloud](/products/cloud/) can submit support tickets via the [Grafana.com Account Portal](/login). +- For questions regarding Enterprise support for Loki, you can get in touch with the Grafana Labs team [here](/contact?pg=docs). Your feedback is always welcome! To submit feedback or a report a potential bug: diff --git a/docs/sources/community/governance.md b/docs/sources/community/governance.md index 5c11281d60..0c35b3b604 100644 --- a/docs/sources/community/governance.md +++ b/docs/sources/community/governance.md @@ -1,5 +1,6 @@ --- title: Governance +description: Governance --- # Governance @@ -50,24 +51,24 @@ In case a member leaves, the [offboarding](#offboarding) procedure is applied. The current team members are: - Aditya C S - [adityacs](https://github.com/adityacs) -- Cyril Tovena - [cyriltovena](https://github.com/cyriltovena) ([Grafana Labs](https://grafana.com/)) -- Danny Kopping - [dannykopping](https://github.com/dannykopping) ([Grafana Labs](https://grafana.com/)) -- David Kaltschmidt - [davkal](https://github.com/davkal) ([Grafana Labs](https://grafana.com/)) -- Edward Welch - [slim-bean](https://github.com/slim-bean) ([Grafana Labs](https://grafana.com/)) -- Goutham Veeramachaneni - [gouthamve](https://github.com/gouthamve) ([Grafana Labs](https://grafana.com/)) -- Joe Elliott - [joe-elliott](https://github.com/joe-elliott) ([Grafana Labs](https://grafana.com/)) -- Karsten Jeschkies - [jeschkies](https://github.com/jeschkies) ([Grafana Labs](https://grafana.com/)) -- Kaviraj Kanagaraj - [kavirajk](https://github.com/kavirajk) ([Grafana Labs](https://grafana.com/)) +- Cyril Tovena - [cyriltovena](https://github.com/cyriltovena) ([Grafana Labs](/)) +- Danny Kopping - [dannykopping](https://github.com/dannykopping) ([Grafana Labs](/)) +- David Kaltschmidt - [davkal](https://github.com/davkal) ([Grafana Labs](/)) +- Edward Welch - [slim-bean](https://github.com/slim-bean) ([Grafana Labs](/)) +- Goutham Veeramachaneni - [gouthamve](https://github.com/gouthamve) ([Grafana Labs](/)) +- Joe Elliott - [joe-elliott](https://github.com/joe-elliott) ([Grafana Labs](/)) +- Karsten Jeschkies - [jeschkies](https://github.com/jeschkies) ([Grafana Labs](/)) +- Kaviraj Kanagaraj - [kavirajk](https://github.com/kavirajk) ([Grafana Labs](/)) - Li Guozhong - [liguozhong](https://github.com/liguozhong) ([Alibaba Cloud](https://alibabacloud.com/)) -- Owen Diehl - [owen-d](https://github.com/owen-d) ([Grafana Labs](https://grafana.com/)) +- Owen Diehl - [owen-d](https://github.com/owen-d) ([Grafana Labs](/)) - Periklis Tsirakidis - [periklis](https://github.com/periklis) ([Red Hat](https://www.redhat.com/)) -- Sandeep Sukhani - [sandeepsukhani](https://github.com/sandeepsukhani) ([Grafana Labs](https://grafana.com/)) -- Tom Braack - [sh0rez](https://github.com/sh0rez) ([Grafana Labs](https://grafana.com/)) -- Tom Wilkie - [tomwilkie](https://github.com/tomwilkie) ([Grafana Labs](https://grafana.com/)) +- Sandeep Sukhani - [sandeepsukhani](https://github.com/sandeepsukhani) ([Grafana Labs](/)) +- Tom Braack - [sh0rez](https://github.com/sh0rez) ([Grafana Labs](/)) +- Tom Wilkie - [tomwilkie](https://github.com/tomwilkie) ([Grafana Labs](/)) The current Loki SIG Operator team members are: - Brett Jones - [blockloop](https://github.com/blockloop/) ([InVision](https://www.invisionapp.com/)) -- Cyril Tovena - [cyriltovena](https://github.com/cyriltovena) ([Grafana Labs](https://grafana.com/)) +- Cyril Tovena - [cyriltovena](https://github.com/cyriltovena) ([Grafana Labs](/)) - Gerard Vanloo - [Red-GV](https://github.com/Red-GV) ([IBM](https://www.ibm.com)) - Periklis Tsirakidis - [periklis](https://github.com/periklis) ([Red Hat](https://www.redhat.com)) - Sashank Agrawal - [sasagarw](https://github.com/sasagarw/) ([Red Hat](https://www.redhat.com)) diff --git a/docs/sources/configuration/query-frontend.md b/docs/sources/configuration/query-frontend.md index 477822bd42..d944b85635 100644 --- a/docs/sources/configuration/query-frontend.md +++ b/docs/sources/configuration/query-frontend.md @@ -1,7 +1,8 @@ --- title: Query Frontend +description: Kubernetes Query Frontend Example --- -# Kubernetes Query Frontend Example +# Query Frontend ## Disclaimer diff --git a/docs/sources/design-documents/2020-02-Promtail-Push-API.md b/docs/sources/design-documents/2020-02-Promtail-Push-API.md index e0e95ba0e5..babe4ef9de 100644 --- a/docs/sources/design-documents/2020-02-Promtail-Push-API.md +++ b/docs/sources/design-documents/2020-02-Promtail-Push-API.md @@ -1,5 +1,6 @@ --- title: Promtail Push API +description: Promtail Push API weight: 20 --- # Promtail Push API @@ -63,7 +64,7 @@ rejected pushes. Users are recommended to do one of the following: ## Implementation As discussed in this document, this feature will be implemented by copying the -existing [Loki Push API](https://grafana.com/docs/loki/latest/api/#post-lokiapiv1push) +existing [Loki Push API](/docs/loki/latest/api/#post-lokiapiv1push) and exposing it via Promtail. ## Considered Alternatives diff --git a/docs/sources/design-documents/2020-09-Write-Ahead-Log.md b/docs/sources/design-documents/2020-09-Write-Ahead-Log.md index 8de0d870bc..438249d614 100644 --- a/docs/sources/design-documents/2020-09-Write-Ahead-Log.md +++ b/docs/sources/design-documents/2020-09-Write-Ahead-Log.md @@ -1,10 +1,11 @@ --- title: Write-Ahead Logs +description: Write-Ahead Logs weight: 30 --- ## Write-Ahead Logs -Author: Owen Diehl - [owen-d](https://github.com/owen-d) ([Grafana Labs](https://grafana.com/)) +Author: Owen Diehl - [owen-d](https://github.com/owen-d) ([Grafana Labs](/)) Date: 30/09/2020 diff --git a/docs/sources/design-documents/2021-01-Ordering-Constraint-Removal.md b/docs/sources/design-documents/2021-01-Ordering-Constraint-Removal.md index c0775903b5..f14a6ce772 100644 --- a/docs/sources/design-documents/2021-01-Ordering-Constraint-Removal.md +++ b/docs/sources/design-documents/2021-01-Ordering-Constraint-Removal.md @@ -1,10 +1,11 @@ --- title: Ordering Constraint Removal +description: Ordering Constraint Removal weight: 40 --- ## Ordering Constraint Removal -Author: Owen Diehl - [owen-d](https://github.com/owen-d) ([Grafana Labs](https://grafana.com/)) +Author: Owen Diehl - [owen-d](https://github.com/owen-d) ([Grafana Labs](/)) Date: 28/01/2021 diff --git a/docs/sources/design-documents/_index.md b/docs/sources/design-documents/_index.md index 40954f0e4f..310b8257d1 100644 --- a/docs/sources/design-documents/_index.md +++ b/docs/sources/design-documents/_index.md @@ -1,10 +1,11 @@ --- title: Design documents +description: Design documents weight: 1300 --- # Design documents -- [Labels from Logs](labels/) -- [Promtail Push API](2020-02-promtail-push-api/) -- [Write-Ahead Logs](2020-09-write-ahead-log/) -- [Ordering Constraint Removal](2021-01-ordering-constraint-removal/) \ No newline at end of file +- [Labels from Logs]({{}}) +- [Promtail Push API]({{}}) +- [Write-Ahead Logs]({{}}) +- [Ordering Constraint Removal]({{}}) diff --git a/docs/sources/design-documents/labels.md b/docs/sources/design-documents/labels.md index ca22fda776..9994c651c5 100644 --- a/docs/sources/design-documents/labels.md +++ b/docs/sources/design-documents/labels.md @@ -1,8 +1,9 @@ --- title: Labels +description: Labels from Logs weight: 10 --- -# Labels from Logs +# Labels Author: Ed Welch Date: February 2019 diff --git a/docs/sources/fundamentals/_index.md b/docs/sources/fundamentals/_index.md index f87ed6b9a7..364d247fe8 100644 --- a/docs/sources/fundamentals/_index.md +++ b/docs/sources/fundamentals/_index.md @@ -1,8 +1,9 @@ --- title: Fundamentals +description: Grafana Loki Fundamentals weight: 150 --- -# Grafana Loki Fundamentals +# Fundamentals This section explains fundamental concepts about Grafana Loki: diff --git a/docs/sources/fundamentals/architecture/_index.md b/docs/sources/fundamentals/architecture/_index.md index 76bc961c79..8b3c08118b 100644 --- a/docs/sources/fundamentals/architecture/_index.md +++ b/docs/sources/fundamentals/architecture/_index.md @@ -1,10 +1,11 @@ --- title: Architecture +description: Grafana Loki's Architecture weight: 200 aliases: - /docs/loki/latest/architecture/ --- -# Grafana Loki's Architecture +# Architecture ## Multi-tenancy @@ -74,7 +75,7 @@ bytes of the log entry. ### Single Store -Loki stores all data in a single object storage backend. This mode of operation became generally available with Loki 2.0 and is fast, cost-effective, and simple, not to mention where all current and future development lies. This mode uses an adapter called [`boltdb_shipper`](../../operations/storage/boltdb-shipper) to store the `index` in object storage (the same way we store `chunks`). +Loki stores all data in a single object storage backend. This mode of operation became generally available with Loki 2.0 and is fast, cost-effective, and simple, not to mention where all current and future development lies. This mode uses an adapter called [`boltdb_shipper`]({{}}) to store the `index` in object storage (the same way we store `chunks`). ### Deprecated: Multi-store @@ -95,7 +96,7 @@ maintenance tasks. It consists of: > Unlike the other core components of Loki, the chunk store is not a separate > service, job, or process, but rather a library embedded in the two services -> that need to access Loki data: the [ingester](#ingester) and [querier](#querier). +> that need to access Loki data: the [ingester]({{}}) and [querier]({{}}). The chunk store relies on a unified interface to the "[NoSQL](https://en.wikipedia.org/wiki/NoSQL)" stores (DynamoDB, Bigtable, and @@ -135,7 +136,7 @@ To summarize, the read path works as follows: ## Write Path -![chunk_diagram](chunks_diagram.png) +![chunk_diagram](./chunks_diagram.png) To summarize, the write path works as follows: diff --git a/docs/sources/fundamentals/architecture/components.md b/docs/sources/fundamentals/architecture/components/_index.md similarity index 95% rename from docs/sources/fundamentals/architecture/components.md rename to docs/sources/fundamentals/architecture/components/_index.md index a74a0218b2..6aca2dcf14 100644 --- a/docs/sources/fundamentals/architecture/components.md +++ b/docs/sources/fundamentals/architecture/components/_index.md @@ -1,10 +1,11 @@ --- title: Components +description: Components weight: 30 --- # Components -![components_diagram](../loki_architecture_components.svg) +![components_diagram](./loki_architecture_components.svg) ## Distributor @@ -31,7 +32,7 @@ Currently the only way the distributor mutates incoming data is by normalizing l The distributor can also rate limit incoming logs based on the maximum per-tenant bitrate. It does this by checking a per tenant limit and dividing it by the current number of distributors. This allows the rate limit to be specified per tenant at the cluster level and enables us to scale the distributors up or down and have the per-distributor limit adjust accordingly. For instance, say we have 10 distributors and tenant A has a 10MB rate limit. Each distributor will allow up to 1MB/second before limiting. Now, say another large tenant joins the cluster and we need to spin up 10 more distributors. The now 20 distributors will adjust their rate limits for tenant A to `(10MB / 20 distributors) = 500KB/s`! This is how global limits allow much simpler and safer operation of the Loki cluster. -**Note: The distributor uses the `ring` component under the hood to register itself amongst it's peers and get the total number of active distributors. This is a different "key" than the ingesters use in the ring and comes from the distributor's own [ring configuration](../../../configuration#distributor_config).** +**Note: The distributor uses the `ring` component under the hood to register itself amongst it's peers and get the total number of active distributors. This is a different "key" than the ingesters use in the ring and comes from the distributor's own [ring configuration]({{}}).** ### Forwarding @@ -138,7 +139,7 @@ deduplicated. ### Timestamp Ordering -Loki can be configured to [accept out-of-order writes](../../configuration/#accept-out-of-order-writes). +Loki can be configured to [accept out-of-order writes]({{}}). When not configured to accept out-of-order writes, the ingester validates that ingested log lines are in order. When an ingester receives a log line that doesn't follow the expected order, the line @@ -153,7 +154,7 @@ Logs from each unique set of labels are built up into "chunks" in memory and then flushed to the backing storage backend. If an ingester process crashes or exits abruptly, all the data that has not yet -been flushed could be lost. Loki is usually configured with a [Write Ahead Log](../../operations/storage/wal) which can be _replayed_ on restart as well as with a `replication_factor` (usually 3) of each log to mitigate this risk. +been flushed could be lost. Loki is usually configured with a [Write Ahead Log]({{}}) which can be _replayed_ on restart as well as with a `replication_factor` (usually 3) of each log to mitigate this risk. When not configured to accept out-of-order writes, all lines pushed to Loki for a given stream (unique combination of @@ -169,7 +170,7 @@ nanosecond timestamps: different content, the log line is accepted. This means it is possible to have two different log lines for the same timestamp. -### Handoff - Deprecated in favor of the [WAL](../../operations/storage/wal) +### Handoff - Deprecated in favor of the [WAL]({{}}) By default, when an ingester is shutting down and tries to leave the hash ring, it will wait to see if a new ingester tries to enter before flushing and will @@ -223,7 +224,7 @@ Caching log (filter, regexp) queries are under active development. ## Querier -The **querier** service handles queries using the [LogQL](../../logql/) query +The **querier** service handles queries using the [LogQL]({{}}) query language, fetching logs both from the ingesters and from long-term storage. Queriers query all ingesters for in-memory data before falling back to diff --git a/docs/sources/fundamentals/architecture/loki_architecture_components.svg b/docs/sources/fundamentals/architecture/components/loki_architecture_components.svg similarity index 100% rename from docs/sources/fundamentals/architecture/loki_architecture_components.svg rename to docs/sources/fundamentals/architecture/components/loki_architecture_components.svg diff --git a/docs/sources/fundamentals/architecture/deployment-modes.md b/docs/sources/fundamentals/architecture/deployment-modes/_index.md similarity index 90% rename from docs/sources/fundamentals/architecture/deployment-modes.md rename to docs/sources/fundamentals/architecture/deployment-modes/_index.md index 151d31a7cd..81dafba98a 100644 --- a/docs/sources/fundamentals/architecture/deployment-modes.md +++ b/docs/sources/fundamentals/architecture/deployment-modes/_index.md @@ -1,5 +1,6 @@ --- title: Deployment modes +description: Deployment modes weight: 20 --- # Deployment modes @@ -28,14 +29,14 @@ This is monolithic mode; it runs all of Loki’s microservice components inside a single process as a single binary or Docker image. -![monolithic mode diagram](../monolithic-mode.png) +![monolithic mode diagram](./monolithic-mode.png) Monolithic mode is useful for getting started quickly to experiment with Loki, as well as for small read/write volumes of up to approximately 100GB per day. Horizontally scale up a monolithic mode deployment to more instances by using a shared object store, and by configuring the -[`memberlist_config` section](../../../configuration/#memberlist_config) +[`ring` section]({{}}) to share state between all instances. High availability can be configured by running two Loki instances @@ -54,11 +55,11 @@ Loki provides the simple scalable deployment mode. This deployment mode can scale to several TBs of logs per day and more. Consider the microservices mode approach for very large Loki installations. -![simple scalable deployment mode diagram](../simple-scalable.png) +![simple scalable deployment mode diagram](./simple-scalable.png) In this mode the component microservices of Loki are bundled into two targets: `-target=read` and `-target=write`. -The BoltDB [compactor](../../../operations/storage/boltdb-shipper/#compactor) +The BoltDB [compactor]({{}}) service will run as part of the read target. There are advantages to separating the read and write paths: @@ -89,7 +90,7 @@ Each process is invoked specifying its `target`: * ruler * compactor -![microservices mode diagram](../microservices-mode.png) +![microservices mode diagram](./microservices-mode.png) Running components as individual microservices allows scaling up by increasing the quantity of microservices. diff --git a/docs/sources/fundamentals/architecture/microservices-mode.png b/docs/sources/fundamentals/architecture/deployment-modes/microservices-mode.png similarity index 100% rename from docs/sources/fundamentals/architecture/microservices-mode.png rename to docs/sources/fundamentals/architecture/deployment-modes/microservices-mode.png diff --git a/docs/sources/fundamentals/architecture/monolithic-mode.png b/docs/sources/fundamentals/architecture/deployment-modes/monolithic-mode.png similarity index 100% rename from docs/sources/fundamentals/architecture/monolithic-mode.png rename to docs/sources/fundamentals/architecture/deployment-modes/monolithic-mode.png diff --git a/docs/sources/fundamentals/architecture/simple-scalable.png b/docs/sources/fundamentals/architecture/deployment-modes/simple-scalable.png similarity index 100% rename from docs/sources/fundamentals/architecture/simple-scalable.png rename to docs/sources/fundamentals/architecture/deployment-modes/simple-scalable.png diff --git a/docs/sources/fundamentals/architecture/rings.md b/docs/sources/fundamentals/architecture/rings/_index.md similarity index 94% rename from docs/sources/fundamentals/architecture/rings.md rename to docs/sources/fundamentals/architecture/rings/_index.md index aa8d89fba8..41da771fb6 100644 --- a/docs/sources/fundamentals/architecture/rings.md +++ b/docs/sources/fundamentals/architecture/rings/_index.md @@ -1,5 +1,6 @@ --- title: Consistent Hash Rings +description: Consistent Hash Rings weight: 40 --- # Consistent Hash Rings @@ -34,7 +35,7 @@ These components can optionally be connected into a hash ring: In an architecture that has three distributors and three ingestors defined, the hash rings for these components connect the instances of same-type components. -![distributor and ingester rings](../ring-overview.png) +![distributor and ingester rings](./ring-overview.png) Each node in the ring represents an instance of a component. Each node has a key-value store that holds communication information @@ -49,7 +50,7 @@ For each node, the key-value store holds: ## Configuring rings -Define [ring configuration](../../../configuration/#ring_config) within the `common.ring_config` block. +Define [ring configuration]({{}}) within the `common.ring_config` block. Use the default `memberlist` key-value store type unless there is a compelling reason to use a different key-value store type. diff --git a/docs/sources/fundamentals/architecture/ring-overview.png b/docs/sources/fundamentals/architecture/rings/ring-overview.png similarity index 100% rename from docs/sources/fundamentals/architecture/ring-overview.png rename to docs/sources/fundamentals/architecture/rings/ring-overview.png diff --git a/docs/sources/fundamentals/labels.md b/docs/sources/fundamentals/labels.md index bf92f6dd62..e542e038e8 100644 --- a/docs/sources/fundamentals/labels.md +++ b/docs/sources/fundamentals/labels.md @@ -1,5 +1,6 @@ --- title: Labels +description: Labels weight: 300 aliases: - /docs/loki/latest/getting-started/labels/ @@ -8,7 +9,7 @@ aliases: Labels are key value pairs and can be defined as anything! We like to refer to them as metadata to describe a log stream. If you are familiar with Prometheus, there are a few labels you are used to seeing like `job` and `instance`, and I will use those in the coming examples. -The scrape configs we provide with Grafana Loki define these labels, too. If you are using Prometheus, having consistent labels between Loki and Prometheus is one of Loki's superpowers, making it incredibly [easy to correlate your application metrics with your log data](https://grafana.com/blog/2019/05/06/how-loki-correlates-metrics-and-logs--and-saves-you-money/). +The scrape configs we provide with Grafana Loki define these labels, too. If you are using Prometheus, having consistent labels between Loki and Prometheus is one of Loki's superpowers, making it incredibly [easy to correlate your application metrics with your log data](/blog/2019/05/06/how-loki-correlates-metrics-and-logs--and-saves-you-money/). ## How Loki uses labels @@ -145,7 +146,7 @@ The two previous examples use statically defined labels with a single value; how __path__: /var/log/apache.log ``` -This regex matches every component of the log line and extracts the value of each component into a capture group. Inside the pipeline code, this data is placed in a temporary data structure that allows using it for several purposes during the processing of that log line (at which point that temp data is discarded). Much more detail about this can be found in the [Promtail pipelines](../../clients/promtail/pipelines/) documentation. +This regex matches every component of the log line and extracts the value of each component into a capture group. Inside the pipeline code, this data is placed in a temporary data structure that allows using it for several purposes during the processing of that log line (at which point that temp data is discarded). Much more detail about this can be found in the [Promtail pipelines]({{}}) documentation. From that regex, we will be using two of the capture groups to dynamically set two labels based on content from the log line itself: @@ -201,7 +202,7 @@ Now let's talk about Loki, where the index is typically an order of magnitude sm Loki will effectively keep your static costs as low as possible (index size and memory requirements as well as static log storage) and make the query performance something you can control at runtime with horizontal scaling. -To see how this works, let's look back at our example of querying your access log data for a specific IP address. We don't want to use a label to store the IP address. Instead we use a [filter expression](../../logql/log_queries#line-filter-expression) to query for it: +To see how this works, let's look back at our example of querying your access log data for a specific IP address. We don't want to use a label to store the IP address. Instead we use a [filter expression]({{}}) to query for it: ``` {job="apache"} |= "11.11.11.11" diff --git a/docs/sources/fundamentals/overview/_index.md b/docs/sources/fundamentals/overview/_index.md index 06f9f5dd6f..2a60bb9096 100644 --- a/docs/sources/fundamentals/overview/_index.md +++ b/docs/sources/fundamentals/overview/_index.md @@ -1,5 +1,6 @@ --- title: Overview +description: Overview weight: 100 aliases: - /docs/loki/latest/overview/ @@ -21,7 +22,7 @@ An agent (also called a client) acquires logs, turns the logs into streams, and pushes the streams to Loki through an HTTP API. The Promtail agent is designed for Loki installations, -but many other [Agents](../../clients/) seamlessly integrate with Loki. +but many other [Agents]({{}}) seamlessly integrate with Loki. ![Loki agent interaction](loki-overview-2.png) @@ -30,7 +31,7 @@ Each stream identifies a set of logs associated with a unique set of labels. A quality set of labels is key to the creation of an index that is both compact and allows for efficient query execution. -[LogQL](../../logql) is the query language for Loki. +[LogQL]({{}}) is the query language for Loki. ## Loki features diff --git a/docs/sources/getting-started/_index.md b/docs/sources/getting-started/_index.md index 22f4e8305a..1df05642ec 100644 --- a/docs/sources/getting-started/_index.md +++ b/docs/sources/getting-started/_index.md @@ -6,7 +6,7 @@ aliases: - /docs/loki/latest/getting-started/get-logs-into-loki/ --- -# Getting started with Grafana Loki +# Getting started This guide assists the reader to create and use a simple Loki cluster. The cluster is intended for testing, development, and evaluation; @@ -22,7 +22,7 @@ Grafana provides a way to pose queries against the logs stored in Loki and visua The test environment uses Docker compose to instantiate these parts, each in its own container: -- One [single scalable deployment](../fundamentals/architecture/deployment-modes/) mode **Loki** instance has: +- One [single scalable deployment]({{}}) mode **Loki** instance has: - One Loki read component - One Loki write component - **Minio** is Loki's storage back end in the test environment. @@ -62,10 +62,10 @@ The write component returns `ready` when you point a web browser at http://local ## Use Grafana and the test environment -Use [Grafana](https://grafana.com/docs/grafana/latest/) to query and observe the log lines captured in the Loki cluster by navigating a browser to http://localhost:3000. -The Grafana instance has Loki configured as a [datasource](https://grafana.com/docs/grafana/latest/datasources/loki/). +Use [Grafana](/docs/grafana/latest/) to query and observe the log lines captured in the Loki cluster by navigating a browser to http://localhost:3000. +The Grafana instance has Loki configured as a [datasource](/docs/grafana/latest/datasources/loki/). -Click on the Grafana instance's [Explore](https://grafana.com/docs/grafana/latest/explore/) icon to bring up the explore pane. +Click on the Grafana instance's [Explore](/docs/grafana/latest/explore/) icon to bring up the explore pane. Use the Explore dropdown menu to choose the Loki datasource and bring up the Loki query browser. @@ -97,7 +97,7 @@ To see every log line other than those that contain the value 401: {container="evaluate-loki_flog_1"} != "401" ``` -Refer to [query examples](../logql/query_examples/) for more examples. +Refer to [query examples]({{}}) for more examples. ## Stop and clean up the test environment diff --git a/docs/sources/installation/_index.md b/docs/sources/installation/_index.md index cdd4ce8f1d..d0fa6120bb 100644 --- a/docs/sources/installation/_index.md +++ b/docs/sources/installation/_index.md @@ -1,5 +1,6 @@ --- title: Installation +description: Installation weight: 200 --- @@ -7,13 +8,13 @@ weight: 200 There are several methods of installing Loki and Promtail: -- [Install using Tanka (recommended)](tanka/) -- [Install using Helm](helm/) -- [Install through Docker or Docker Compose](docker/) -- [Install and run locally](local/) -- [Install from source](install-from-source/) +- [Install using Tanka (recommended)]({{}}) +- [Install using Helm]({{}}) +- [Install through Docker or Docker Compose]({{}}) +- [Install and run locally]({{}}) +- [Install from source]({{}}) -The [Sizing Tool](sizing/) can be used to determine the proper cluster sizing +The [Sizing Tool]({{}}) can be used to determine the proper cluster sizing given an expected ingestion rate and query performance. It targets the Helm installation on Kubernetes. diff --git a/docs/sources/installation/docker.md b/docs/sources/installation/docker.md index b22be133fc..1a06b31904 100644 --- a/docs/sources/installation/docker.md +++ b/docs/sources/installation/docker.md @@ -1,5 +1,6 @@ --- -title: Docker +title: Install Grafana Loki with Docker or Docker Compose +description: Docker weight: 30 --- # Install Grafana Loki with Docker or Docker Compose diff --git a/docs/sources/installation/helm/_index.md b/docs/sources/installation/helm/_index.md index 954167a43a..41bf10cff7 100644 --- a/docs/sources/installation/helm/_index.md +++ b/docs/sources/installation/helm/_index.md @@ -12,7 +12,7 @@ keywords: - installation --- -# Install Loki using Helm +# Install Grafana Loki with Helm The [Helm](https://helm.sh/) chart allows you to configure, install, and upgrade Grafana Loki within a Kubernetes cluster. @@ -22,4 +22,4 @@ This guide references the Loki Helm chart version 3.0 or greater and contains th ## Reference -[Values reference](reference) +[Values reference]({{}}) diff --git a/docs/sources/installation/helm/concepts.md b/docs/sources/installation/helm/concepts.md index 9a2191640d..95b1cc6152 100644 --- a/docs/sources/installation/helm/concepts.md +++ b/docs/sources/installation/helm/concepts.md @@ -11,7 +11,7 @@ keywords: - caching --- -# Components +# Helm Chart Components This section describes the components installed by the Helm Chart. @@ -25,7 +25,7 @@ This chart includes dashboards for monitoring Loki. These require the scrape con ## Canary -This chart installs the [canary](../../../operations/loki-canary) and its alerts by default. This is another tool to verify the Loki deployment is in a healthy state. It can be disabled with `monitoring.lokiCanary.enabled=false`. +This chart installs the [canary]({{}}) and its alerts by default. This is another tool to verify the Loki deployment is in a healthy state. It can be disabled with `monitoring.lokiCanary.enabled=false`. ## Gateway diff --git a/docs/sources/installation/helm/configure-storage/index.md b/docs/sources/installation/helm/configure-storage/index.md index 8e0e9aa543..5f75e27ce1 100644 --- a/docs/sources/installation/helm/configure-storage/index.md +++ b/docs/sources/installation/helm/configure-storage/index.md @@ -11,9 +11,9 @@ keywords: - minio --- -# Configure Loki's storage +# Configure storage -The [scalable](../install-scalable/) installation requires a managed object store such as AWS S3 or Google Cloud Storage or a self-hosted store such as Minio. The [single binary](../install-monolithic/) installation can only use the filesystem for storage. +The [scalable]({{}}) installation requires a managed object store such as AWS S3 or Google Cloud Storage or a self-hosted store such as Minio. The [single binary]({{}}) installation can only use the filesystem for storage. This guide assumes Loki will be installed in on of the modes above and that a `values.yaml ` has been created. @@ -37,7 +37,7 @@ This guide assumes Loki will be installed in on of the modes above and that a `v **To grant access to S3 via an IAM role without providing credentials:** -1. Provision an IAM role, policy and S3 bucket as described in [Storage](../../../storage/#aws-deployment-s3-single-store). +1. Provision an IAM role, policy and S3 bucket as described in [Storage]({{}}). - If the Terraform module was used note the annotation emitted by `terraform output -raw annotation`. 2. Add the IAM role annotation to the service account in `values.yaml`: diff --git a/docs/sources/installation/helm/install-monolithic/index.md b/docs/sources/installation/helm/install-monolithic/index.md index 8880345979..ebb3e7427f 100644 --- a/docs/sources/installation/helm/install-monolithic/index.md +++ b/docs/sources/installation/helm/install-monolithic/index.md @@ -8,11 +8,11 @@ weight: 100 keywords: [] --- -# Install the single binary Helm Chart +# Install the Single Binary Helm Chart This Helm Chart installation runs the Grafana Loki *single binary* within a Kubernetes cluster. -If the storage type is set to `filesystem`, this chart configures Loki to run the `all` target in a [monolithic mode](../../../../fundamentals/architecture/deployment-modes/#monolithic-mode), designed to work with a filesystem storage. It will also configure meta-monitoring of metrics and logs. +If the storage type is set to `filesystem`, this chart configures Loki to run the `all` target in a [monolithic mode]({{}}), designed to work with a filesystem storage. It will also configure meta-monitoring of metrics and logs. It is not possible to install the single binary with a different storage type. diff --git a/docs/sources/installation/helm/install-scalable/index.md b/docs/sources/installation/helm/install-scalable/index.md index 77a1b86c6c..210d775ab0 100644 --- a/docs/sources/installation/helm/install-scalable/index.md +++ b/docs/sources/installation/helm/install-scalable/index.md @@ -14,7 +14,7 @@ keywords: [] This Helm Chart installation runs the Grafana Loki cluster within a Kubernetes cluster. -If object storge is configured, this chart configures Loki to run `read` and `write` targets in a [scalable mode](../../../../fundamentals/architecture/deployment-modes/#simple-scalable-deployment-mode), highly available architecture (3 replicas of each) designed to work with AWS S3 object storage. It will also configure meta-monitoring of metrics and logs. +If object storge is configured, this chart configures Loki to run `read` and `write` targets in a [scalable mode]({{}}), highly available architecture (3 replicas of each) designed to work with AWS S3 object storage. It will also configure meta-monitoring of metrics and logs. It is not possible to run the scalable mode with the `filesystem` storage. @@ -58,7 +58,7 @@ It is not possible to run the scalable mode with the `filesystem` storage. insecure: false ``` - Consult the [Reference](../reference) for configuring other storage providers. + Consult the [Reference]({{}}) for configuring other storage providers. - Define the AWS S3 credentials in the file. diff --git a/docs/sources/installation/helm/migrate-from-distributed/index.md b/docs/sources/installation/helm/migrate-from-distributed/index.md index efe001e27b..cb106c58e3 100644 --- a/docs/sources/installation/helm/migrate-from-distributed/index.md +++ b/docs/sources/installation/helm/migrate-from-distributed/index.md @@ -11,7 +11,7 @@ keywords: - distributed --- -# Migrating from `loki-distributed` +# Migrate from `loki-distributed` Helm Chart This guide will walk you through migrating to the `loki` Helm Chart, v3.0 or higher, from the `loki-distributed` Helm Chart (v0.63.2 at time of writing). The process consists of deploying the new `loki` Helm Chart alongside the existing `loki-distributed` installation. By joining the new cluster to the exsiting cluster's ring, you will create one large cluster. This will allow you to manually bring down the `loki-distributed` components in a safe way to avoid any data loss. diff --git a/docs/sources/installation/helm/migrate-to-three-scalable-targets/index.md b/docs/sources/installation/helm/migrate-to-three-scalable-targets/index.md index 942f6648d0..215c693690 100644 --- a/docs/sources/installation/helm/migrate-to-three-scalable-targets/index.md +++ b/docs/sources/installation/helm/migrate-to-three-scalable-targets/index.md @@ -12,7 +12,7 @@ keywords: - simple --- -# Migrating to Three Scalable Targets +# Migrate To Three Scalable Targets This guide will walk you through migrating from the old, two target, scalable configuration to the new, three target, scalable configuration. This new configuration introduces a `backend` component, and reduces the `read` component to running just a `Querier` and `QueryFrontend`, allowing it to be run as a kubernetes `Deployment` rather than a `StatefulSet`. diff --git a/docs/sources/installation/install-from-source.md b/docs/sources/installation/install-from-source.md index 0613c6bbac..429c02ab40 100644 --- a/docs/sources/installation/install-from-source.md +++ b/docs/sources/installation/install-from-source.md @@ -1,5 +1,6 @@ --- title: Build from source +description: Build from source weight: 50 --- # Build from source diff --git a/docs/sources/installation/istio.md b/docs/sources/installation/istio.md index a2237e9828..7cd409aa9d 100644 --- a/docs/sources/installation/istio.md +++ b/docs/sources/installation/istio.md @@ -1,3 +1,8 @@ +--- +title: Installation instructions for Istio +description: Installation instructions for Istio +weight: 50 +--- # Installation instructions for Istio The ingestor, querier, etc. might start, but if those changes are not made, you will see logs like diff --git a/docs/sources/installation/local.md b/docs/sources/installation/local.md index 007665f53a..1004898370 100644 --- a/docs/sources/installation/local.md +++ b/docs/sources/installation/local.md @@ -1,8 +1,9 @@ --- title: Local +description: Install and run Grafana Loki locally weight: 40 --- -# Install and run Grafana Loki locally +# Local In order to log events with Grafana Loki, download and install both Promtail and Loki. - Loki is the logging engine. @@ -15,7 +16,7 @@ The configuration specifies running Loki as a single binary. 1. Navigate to the [release page](https://github.com/grafana/loki/releases/). 2. Scroll down to the Assets section under the version that you want to install. 3. Download the Loki and Promtail .zip files that correspond to your system. - **Note:** Do not download LogCLI or Loki Canary at this time. [LogCLI](../../getting-started/logcli/) allows you to run Loki queries in a command line interface. [Loki Canary](../../operations/loki-canary/) is a tool to audit Loki performance. + **Note:** Do not download LogCLI or Loki Canary at this time. `LogCLI` allows you to run Loki queries in a command line interface. [Loki Canary]({{}}) is a tool to audit Loki performance. 4. Unzip the package contents into the same directory. This is where the two programs will run. 5. In the command line, change directory (`cd` on most systems) to the directory with Loki and Promtail. Copy and paste the commands below into your command line to download generic configuration files. **Note:** Use the corresponding Git refs that match your downloaded Loki version to get the correct configuration file. For example, if you are using Loki version 2.6.1, you need to use the `https://raw.githubusercontent.com/grafana/loki/v2.6.1/cmd/loki/loki-local-config.yaml` URL to download the configuration file that corresponds to the Loki version you aim to run. @@ -40,7 +41,7 @@ The configuration specifies running Loki as a single binary. Loki runs and displays Loki logs in your command line and on http://localhost:3100/metrics. The next step will be running an agent to send logs to Loki. -To do so with Promtail, refer to [get logs into Loki](../../getting-started/get-logs-into-loki/). +To do so with Promtail, refer to the [Promtal configuration]({{}}). ## Release binaries - openSUSE Linux only diff --git a/docs/sources/installation/sizing/index.md b/docs/sources/installation/sizing/index.md index 0217a5cd48..c31eec734b 100644 --- a/docs/sources/installation/sizing/index.md +++ b/docs/sources/installation/sizing/index.md @@ -16,7 +16,7 @@ keywords: [] This tool helps to generate a Helm Charts `values.yaml` file based on specified expected ingestion, retention rate and node type. It will always configure a - [scalable](../../fundamentals/architecture/deployment-modes/#simple-scalable-deployment-mode) deployment. The storage needs to be configured after generation. + [scalable]({{}}) deployment. The storage needs to be configured after generation.
diff --git a/docs/sources/installation/tanka.md b/docs/sources/installation/tanka.md index 51f837a2de..486a92bbcf 100644 --- a/docs/sources/installation/tanka.md +++ b/docs/sources/installation/tanka.md @@ -1,8 +1,9 @@ --- title: Tanka +description: Install Grafana Loki with Tanka weight: 10 --- -# Install Grafana Loki with Tanka +# Tanka [Tanka](https://tanka.dev) is a reimplementation of [Ksonnet](https://ksonnet.io) that Grafana Labs created after Ksonnet was @@ -42,7 +43,7 @@ jb install github.com/grafana/loki/production/ksonnet/promtail@main Revise the YAML contents of `environments/loki/main.jsonnet`, updating these variables: - Update the `username`, `password`, and the relevant `htpasswd` variable values. -- Update the S3 or GCS variable values, depending on your object storage type. See [storage_config](https://grafana.com/docs/loki/latest/configuration/#storage_config) for more configuration details. +- Update the S3 or GCS variable values, depending on your object storage type. See [storage_config](/docs/loki/latest/configuration/#storage_config) for more configuration details. - Remove from the configuration the S3 or GCS object storage variables that are not part of your setup. - Update the value of `boltdb_shipper_shared_store` to the type of object storage you are using. Options are `gcs`, `s3`, `azure`, or `filesystem`. Update the `object_store` variable under the `schema_config` section to the same value. - Update the Promtail configuration `container_root_path` variable's value to reflect your root path for the Docker daemon. Run `docker info | grep "Root Dir"` to acquire your root path. diff --git a/docs/sources/lids/0001-Introduction.md b/docs/sources/lids/0001-Introduction.md index a3ad630f61..3531bcb58c 100644 --- a/docs/sources/lids/0001-Introduction.md +++ b/docs/sources/lids/0001-Introduction.md @@ -1,8 +1,9 @@ --- title: "0001: Introducing LIDs" +description: "0001: Introducing LIDs" --- -# Introduction of LIDs +# 0001: Introducing LIDs **Author:** Danny Kopping (danny.kopping@grafana.com) @@ -50,4 +51,4 @@ Inspired by Python's [PEP](https://peps.python.org/pep-0001/) and Kafka's [KIP]( Google Docs were considered for this, but they are less useful because: - they would need to be owned by the Grafana Labs organisation, so that they remain viewable even if the author closes their account -- we already have previous [design documents](../design-documents) in our documentation and, in a recent ([5th Jan 2023](https://docs.google.com/document/d/1MNjiHQxwFukm2J4NJRWyRgRIiK7VpokYyATzJ5ce-O8/edit#heading=h.78vexgrrtw5a)) community call, the community expressed a preference for this type of approach \ No newline at end of file +- we already have previous [design documents]({{}}) in our documentation and, in a recent ([5th Jan 2023](https://docs.google.com/document/d/1MNjiHQxwFukm2J4NJRWyRgRIiK7VpokYyATzJ5ce-O8/edit#heading=h.78vexgrrtw5a)) community call, the community expressed a preference for this type of approach diff --git a/docs/sources/lids/_index.md b/docs/sources/lids/_index.md index 7fad2c9d38..f50a093a44 100644 --- a/docs/sources/lids/_index.md +++ b/docs/sources/lids/_index.md @@ -1,5 +1,6 @@ --- title: Loki Improvement Documents (LIDs) +description: Loki Improvement Documents (LIDs) weight: 1400 --- @@ -36,4 +37,4 @@ Once a PR is submitted, it will be reviewed by the sponsor, as well as intereste - A LID is considered completed once it is either rejected or the improvement has been included in a release. - `CHANGELOG` entries should reference LIDs where applicable. - Significant changes to the LID process should be proposed [with a LID](https://www.google.com/search?q=recursion). -- LIDs should be shared with the community on the [`#loki` channel on Slack](https://slack.grafana.com) for comment, and the sponsor should wait **at least 2 weeks** before accepting a proposal. \ No newline at end of file +- LIDs should be shared with the community on the [`#loki` channel on Slack](https://slack.grafana.com) for comment, and the sponsor should wait **at least 2 weeks** before accepting a proposal. diff --git a/docs/sources/lids/template.md b/docs/sources/lids/template.md index f689998fd7..d9cdffeb8f 100644 --- a/docs/sources/lids/template.md +++ b/docs/sources/lids/template.md @@ -1,8 +1,9 @@ --- title: "XXXX: Template" +description: "Template" --- -# Title +# XXXX: Template > _NOTE: the file should be named `_DRAFT_.md` and be placed in the `docs/sources/lids` directory. Once accepted, it will be assigned a LID number and the file will be renamed by the sponsor.
@@ -56,4 +57,4 @@ _Describe the first proposal, what are the benefits and trade-offs that this app _Describe the nth proposal(s), what are the benefits and trade-offs that these approaches have?_ -## Other Notes \ No newline at end of file +## Other Notes diff --git a/docs/sources/logql/_index.md b/docs/sources/logql/_index.md index e3266f9da4..98dd7f1718 100644 --- a/docs/sources/logql/_index.md +++ b/docs/sources/logql/_index.md @@ -1,5 +1,6 @@ --- -title: LogQL +title: "LogQL: Log query language" +description: "LogQL: Log query language" weight: 700 --- # LogQL: Log query language @@ -10,8 +11,8 @@ LogQL uses labels and operators for filtering. There are two types of LogQL queries: -- [Log queries](log_queries/) return the contents of log lines. -- [Metric queries](metric_queries/) extend log queries to calculate values +- [Log queries]({{}}) return the contents of log lines. +- [Metric queries]({{}}) extend log queries to calculate values based on query results. ## Binary operators diff --git a/docs/sources/logql/analyzer.md b/docs/sources/logql/analyzer.md index 694d5c5dc3..0d4cbf77de 100644 --- a/docs/sources/logql/analyzer.md +++ b/docs/sources/logql/analyzer.md @@ -1,5 +1,5 @@ --- -title: LoqQL Analyzer +title: LogQL Analyzer menuTitle: LoqQL Analyzer description: The LogQL Analyzer is an inline educational tool for experimenting with writing LogQL queries. weight: 60 @@ -18,7 +18,7 @@ A set of example log lines are included for each format. Use the provided example log lines, or copy and paste your own log lines into the example log lines box. Use the provided example query, or enter your own query. -The [log stream selector](../log_queries/#log-stream-selector) remains fixed for all possible example queries. +The [log stream selector]({{}}) remains fixed for all possible example queries. Modify the remainder of the log line and click on the **Run query** button to run the entered query against the example log lines. diff --git a/docs/sources/logql/ip.md b/docs/sources/logql/ip.md index c0005950cf..e04aab121e 100644 --- a/docs/sources/logql/ip.md +++ b/docs/sources/logql/ip.md @@ -1,5 +1,6 @@ --- title: Matching IP addresses +description: Matching IP addresses weight: 40 --- diff --git a/docs/sources/logql/log_queries.md b/docs/sources/logql/log_queries/_index.md similarity index 97% rename from docs/sources/logql/log_queries.md rename to docs/sources/logql/log_queries/_index.md index f2c78eb5c5..e41c6141e3 100644 --- a/docs/sources/logql/log_queries.md +++ b/docs/sources/logql/log_queries/_index.md @@ -1,12 +1,13 @@ --- title: Log queries +description: Log queries weight: 10 --- # Log queries All LogQL queries contain a **log stream selector**. -![parts of a query](../query_components.png) +![parts of a query](./query_components.png) Optionally, the log stream selector can be followed by a **log pipeline**. A log pipeline is a set of stage expressions that are chained together and applied to the selected log streams. Each expression can filter out, parse, or mutate log lines and their respective labels. @@ -194,7 +195,7 @@ will always run faster than Line filter expressions are the fastest way to filter logs once the log stream selectors have been applied. -Line filter expressions have support matching IP addresses. See [Matching IP addresses](../ip/) for details. +Line filter expressions have support matching IP addresses. See [Matching IP addresses]({{}}) for details. ### Removing color codes @@ -234,7 +235,7 @@ Using Duration, Number and Bytes will convert the label value prior to comparisi For instance, `logfmt | duration > 1m and bytes_consumed > 20MB` -If the conversion of the label value fails, the log line is not filtered and an `__error__` label is added. To filters those errors see the [pipeline errors](../#pipeline-errors) section. +If the conversion of the label value fails, the log line is not filtered and an `__error__` label is added. To filters those errors see the [pipeline errors]({{}}) section. You can chain multiple predicates using `and` and `or` which respectively express the `and` and `or` binary operations. `and` can be equivalently expressed by a comma, a space or another pipe. Label filters can be place anywhere in a log pipeline. @@ -265,11 +266,11 @@ To evaluate the logical `and` first, use parenthesis, as in this example: > Label filter expressions are the only expression allowed after the unwrap expression. This is mainly to allow filtering errors from the metric extraction. -Label filter expressions have support matching IP addresses. See [Matching IP addresses](../ip/) for details. +Label filter expressions have support matching IP addresses. See [Matching IP addresses]({{}}) for details. ### Parser expression -Parser expression can parse and extract labels from the log content. Those extracted labels can then be used for filtering using [label filter expressions](#label-filter-expression) or for [metric aggregations](../metric_queries). +Parser expression can parse and extract labels from the log content. Those extracted labels can then be used for filtering using [label filter expressions](#label-filter-expression) or for [metric aggregations]({{}}). Extracted label keys are automatically sanitized by all parsers, to follow Prometheus metric name convention.(They can only contain ASCII letters and digits, as well as underscores and colons. They cannot start with a digit.) @@ -289,7 +290,7 @@ If an extracted label key name already exists in the original log stream, the ex Loki supports [JSON](#json), [logfmt](#logfmt), [pattern](#pattern), [regexp](#regular-expression) and [unpack](#unpack) parsers. It's easier to use the predefined parsers `json` and `logfmt` when you can. If you can't, the `pattern` and `regexp` parsers can be used for log lines with an unusual structure. The `pattern` parser is easier and faster to write; it also outperforms the `regexp` parser. -Multiple parsers can be used by a single log pipeline. This is useful for parsing complex logs. There are examples in [Multiple parsers](#multiple-parsers). +Multiple parsers can be used by a single log pipeline. This is useful for parsing complex logs. There are examples in [Multiple parsers]({{}}). #### JSON @@ -499,7 +500,7 @@ those labels: #### unpack -The `unpack` parser parses a JSON log line, unpacking all embedded labels from Promtail's [`pack` stage]({{< relref "../clients/promtail/stages/pack.md" >}}). +The `unpack` parser parses a JSON log line, unpacking all embedded labels from Promtail's [`pack` stage]({{< relref "../../clients/promtail/stages/pack.md" >}}). **A special property `_entry` will also be used to replace the original log line**. For example, using `| unpack` with the log line: @@ -541,7 +542,7 @@ If we have the following labels `ip=1.1.1.1`, `status=200` and `duration=3000`(m The above query will give us the `line` as `1.1.1.1 200 3` -See [template functions](../template_functions/) to learn about available functions in the template format. +See [template functions]({{}}) to learn about available functions in the template format. ### Labels format expression diff --git a/docs/sources/logql/query_components.png b/docs/sources/logql/log_queries/query_components.png similarity index 100% rename from docs/sources/logql/query_components.png rename to docs/sources/logql/log_queries/query_components.png diff --git a/docs/sources/logql/metric_queries.md b/docs/sources/logql/metric_queries.md index 4e7fc0cbc2..68dedb1798 100644 --- a/docs/sources/logql/metric_queries.md +++ b/docs/sources/logql/metric_queries.md @@ -1,5 +1,6 @@ --- title: Metric queries +description: Metric queries weight: 20 --- @@ -55,7 +56,7 @@ Examples: ### Unwrapped range aggregations -Unwrapped ranges uses extracted labels as sample values instead of log lines. However to select which label will be used within the aggregation, the log query must end with an unwrap expression and optionally a label filter expression to discard [errors](../#pipeline-errors). +Unwrapped ranges uses extracted labels as sample values instead of log lines. However to select which label will be used within the aggregation, the log query must end with an unwrap expression and optionally a label filter expression to discard [errors]({{}}). The unwrap expression is noted `| unwrap label_identifier` where the label identifier is the label name to use for extracting sample values. @@ -91,7 +92,7 @@ Which can be used to aggregate over distinct labels dimensions by including a `w `without` removes the listed labels from the result vector, while all other labels are preserved the output. `by` does the opposite and drops labels that are not listed in the `by` clause, even if their label values are identical between all elements of the vector. -See [Unwrap examples](../query_examples/#unwrap-examples) for query examples that use the unwrap expression. +See [Unwrap examples]({{}}) for query examples that use the unwrap expression. ## Built-in aggregation operators @@ -122,7 +123,7 @@ The aggregation operators can either be used to aggregate over all label values The `without` clause removes the listed labels from the resulting vector, keeping all others. The `by` clause does the opposite, dropping labels that are not listed in the clause, even if their label values are identical between all elements of the vector. -See [vector aggregation examples](../query_examples/#vector-aggregation-examples) for query examples that use vector aggregation expressions. +See [vector aggregation examples]({{}}) for query examples that use vector aggregation expressions. ## Functions diff --git a/docs/sources/logql/template_functions.md b/docs/sources/logql/template_functions.md index 25119eb9de..4a4d853cf0 100644 --- a/docs/sources/logql/template_functions.md +++ b/docs/sources/logql/template_functions.md @@ -1,5 +1,6 @@ --- title: Template functions +description: Template functions weight: 30 --- @@ -714,4 +715,4 @@ Examples: Example of a query to print how many times XYZ occurs in a line: ```logql {job="xyzlog"} | line_format `{{ __line__ | count "XYZ"}}` -``` \ No newline at end of file +``` diff --git a/docs/sources/maintaining/_index.md b/docs/sources/maintaining/_index.md index 1c837eec93..7ea68d3141 100644 --- a/docs/sources/maintaining/_index.md +++ b/docs/sources/maintaining/_index.md @@ -1,8 +1,9 @@ --- title: Maintaining +description: Grafana Loki Maintainers' Guide weight: 1200 --- -# Grafana Loki Maintainers' Guide +# Maintaining This section details information for maintainers of Grafana Loki. diff --git a/docs/sources/maintaining/release-loki-build-image.md b/docs/sources/maintaining/release-loki-build-image.md index 35b96bcd82..0c3d666a9a 100644 --- a/docs/sources/maintaining/release-loki-build-image.md +++ b/docs/sources/maintaining/release-loki-build-image.md @@ -1,7 +1,8 @@ --- title: Releasing Loki Build Image +description: Releasing Loki Build Image --- -# Releasing `loki-build-image` +# Releasing Loki Build Image The [`loki-build-image`](https://github.com/grafana/loki/tree/master/loki-build-image) is the Docker image used to run tests and build Grafana Loki binaries in CI. diff --git a/docs/sources/maintaining/release.md b/docs/sources/maintaining/release.md index 5b77d0305c..aeee359cc8 100644 --- a/docs/sources/maintaining/release.md +++ b/docs/sources/maintaining/release.md @@ -1,5 +1,6 @@ --- -title: Releasing Loki +title: Releasing Grafana Loki +description: Releasing Grafana Loki --- # Releasing Grafana Loki diff --git a/docs/sources/operations/_index.md b/docs/sources/operations/_index.md index f0267cb4ee..eb163112d4 100644 --- a/docs/sources/operations/_index.md +++ b/docs/sources/operations/_index.md @@ -1,5 +1,6 @@ --- title: Operations +description: Operations weight: 800 --- diff --git a/docs/sources/operations/authentication.md b/docs/sources/operations/authentication.md index 6642bab358..c1e28a00ff 100644 --- a/docs/sources/operations/authentication.md +++ b/docs/sources/operations/authentication.md @@ -1,8 +1,9 @@ --- title: Authentication +description: Authentication weight: 10 --- -# Authentication with Grafana Loki +# Authentication Grafana Loki does not come with any included authentication layer. Operators are expected to run an authenticating reverse proxy in front of your services, such @@ -11,7 +12,7 @@ as NGINX using basic auth or an OAuth2 proxy. Note that when using Loki in multi-tenant mode, Loki requires the HTTP header `X-Scope-OrgID` to be set to a string identifying the tenant; the responsibility of populating this value should be handled by the authenticating reverse proxy. -Read the [multi-tenancy](../multi-tenancy/) documentation for more information. +Read the [multi-tenancy]({{}}) documentation for more information. For information on authenticating Promtail, please see the docs for [how to -configure Promtail](../../clients/promtail/configuration/). +configure Promtail]({{}}). diff --git a/docs/sources/operations/automatic-stream-sharding.md b/docs/sources/operations/automatic-stream-sharding.md index ebbb27dfd0..b93f164e0b 100644 --- a/docs/sources/operations/automatic-stream-sharding.md +++ b/docs/sources/operations/automatic-stream-sharding.md @@ -1,5 +1,5 @@ --- -title: Automatic Stream Sharding +title: Automatic stream sharding menuTitle: Automatic stream sharding description: Automatic stream sharding can control issues around the per-stream rate limit weight: 110 diff --git a/docs/sources/operations/blocking-queries.md b/docs/sources/operations/blocking-queries.md index 15f18b2ad8..c9d421b346 100644 --- a/docs/sources/operations/blocking-queries.md +++ b/docs/sources/operations/blocking-queries.md @@ -1,5 +1,6 @@ --- title: Blocking Queries +description: Blocking Queries weight: 60 --- # Blocking Queries @@ -8,7 +9,7 @@ In certain situations, you may not be able to control the queries being sent to may be intentionally or unintentionally expensive to run, and they may affect the overall stability or cost of running your service. -You can block queries using [per-tenant overrides](../configuration/#runtime-configuration-file), like so: +You can block queries using [per-tenant overrides]({{}}), like so: ```yaml overrides: @@ -44,4 +45,4 @@ Blocked queries are logged, as well as counted in the `loki_blocked_queries` met ## Scope -Queries received via the API and executed as [alerting/recording rules](../rules/) will be blocked. \ No newline at end of file +Queries received via the API and executed as [alerting/recording rules]({{}}) will be blocked. diff --git a/docs/sources/operations/grafana.md b/docs/sources/operations/grafana.md index e00c46f4df..22632201bc 100644 --- a/docs/sources/operations/grafana.md +++ b/docs/sources/operations/grafana.md @@ -1,14 +1,15 @@ --- title: Loki in Grafana +description: Loki in Grafana weight: 15 aliases: - /docs/loki/latest/getting-started/grafana/ --- # Loki in Grafana -[Grafana 6.0](https://grafana.com/grafana/download/6.0.0) and more recent +[Grafana 6.0](/grafana/download/6.0.0) and more recent versions have built-in support for Grafana Loki. -Use [Grafana 6.3](https://grafana.com/grafana/download/6.3.0) or a more +Use [Grafana 6.3](/grafana/download/6.3.0) or a more recent version to take advantage of [LogQL]({{< relref "../logql/_index.md" >}}) functionality. 1. Log into your Grafana instance. If this is your first time running diff --git a/docs/sources/operations/multi-tenancy.md b/docs/sources/operations/multi-tenancy.md index 0a57d8b704..dd0b6cf692 100644 --- a/docs/sources/operations/multi-tenancy.md +++ b/docs/sources/operations/multi-tenancy.md @@ -1,8 +1,9 @@ --- title: Multi-tenancy +description: Multi-tenancy weight: 50 --- -# Grafana Loki Multi-Tenancy +# Multi-tenancy Grafana Loki is a multi-tenant system; requests and data for tenant A are isolated from tenant B. Requests to the Loki API should include an HTTP header diff --git a/docs/sources/operations/observability.md b/docs/sources/operations/observability.md index 6a97a7da20..c3954bf814 100644 --- a/docs/sources/operations/observability.md +++ b/docs/sources/operations/observability.md @@ -1,8 +1,9 @@ --- title: Observability +description: Observing Grafana Loki weight: 20 --- -# Observing Grafana Loki +# Observability Both Grafana Loki and Promtail expose a `/metrics` endpoint that expose Prometheus metrics (the default port is 3100 for Loki and 80 for Promtail). You will need @@ -90,10 +91,10 @@ Most of these metrics are counters and should continuously increase during norma If Promtail uses any pipelines with metrics stages, those metrics will also be exposed by Promtail at its `/metrics` endpoint. See Promtail's documentation on -[Pipelines](../../clients/promtail/pipelines/) for more information. +[Pipelines]({{}}) for more information. An example Grafana dashboard was built by the community and is available as -dashboard [10004](https://grafana.com/dashboards/10004). +dashboard [10004](/dashboards/10004). ## Metrics cardinality diff --git a/docs/sources/operations/overrides-exporter.md b/docs/sources/operations/overrides-exporter.md index fc9c51aa19..f2858cbbdd 100644 --- a/docs/sources/operations/overrides-exporter.md +++ b/docs/sources/operations/overrides-exporter.md @@ -1,5 +1,6 @@ --- -title: "Overrides Exporter" +title: "Overrides exporter" +description: "Overrides Exporter" weight: 20 --- diff --git a/docs/sources/operations/recording-rules.md b/docs/sources/operations/recording-rules.md index 03a9d0553b..ee86bdf451 100644 --- a/docs/sources/operations/recording-rules.md +++ b/docs/sources/operations/recording-rules.md @@ -1,12 +1,13 @@ --- title: Recording Rules +description: Recording Rules --- # Recording Rules Recording rules are evaluated by the `ruler` component. Each `ruler` acts as its own `querier`, in the sense that it executes queries against the store without using the `query-frontend` or `querier` components. It will respect all query -[limits](https://grafana.com/docs/loki/latest/configuration/#limits_config) put in place for the `querier`. +[limits](/docs/loki/latest/configuration/#limits_config) put in place for the `querier`. Loki's implementation of recording rules largely reuses Prometheus' code. @@ -48,7 +49,7 @@ excessively large due to truncation. ## Scaling -See Mimir's guide for [configuring Grafana Mimir hash rings](https://grafana.com/docs/mimir/latest/operators-guide/configuring/configuring-hash-rings/) for scaling the ruler using a ring. +See Mimir's guide for [configuring Grafana Mimir hash rings](/docs/mimir/latest/operators-guide/configuring/configuring-hash-rings/) for scaling the ruler using a ring. Note: the `ruler` shards by rule _group_, not by individual rules. This is an artifact of the fact that Prometheus recording rules need to run in order since one recording rule can reuse another - but this is not possible in Loki. @@ -69,8 +70,8 @@ so a `Persistent Volume` should be utilised. ### Per-Tenant Limits Remote-write can be configured at a global level in the base configuration, and certain parameters tuned specifically on -a per-tenant basis. Most of the configuration options [defined here](../../configuration/#ruler) -have [override options](../../configuration/#limits_config) (which can be also applied at runtime!). +a per-tenant basis. Most of the configuration options [defined here]({{}}) +have [override options]({{}}) (which can be also applied at runtime!). ### Tuning @@ -122,7 +123,7 @@ aware that if the remote storage is down for longer than `ruler.wal.max-age`, da In cases 2 & 3, you should consider [tuning](#tuning) remote-write appropriately. -Further reading: see [this blog post](https://grafana.com/blog/2021/04/12/how-to-troubleshoot-remote-write-issues-in-prometheus/) +Further reading: see [this blog post](/blog/2021/04/12/how-to-troubleshoot-remote-write-issues-in-prometheus/) by Prometheus maintainer Callum Styan. ### Appender Not Ready diff --git a/docs/sources/operations/request-validation-rate-limits.md b/docs/sources/operations/request-validation-rate-limits.md index 110c0b9c04..224eb7d348 100644 --- a/docs/sources/operations/request-validation-rate-limits.md +++ b/docs/sources/operations/request-validation-rate-limits.md @@ -1,5 +1,6 @@ --- title: Request Validation & Rate-Limit Errors +description: Request Validation & Rate-Limit Errors weight: 30 --- @@ -26,11 +27,11 @@ Rate-limits are enforced when Loki cannot handle more requests from a tenant. This rate-limit is enforced when a tenant has exceeded their configured log ingestion rate-limit. -One solution if you're seeing samples dropped due to `rate_limited` is simply to increase the rate limits on your Loki cluster. These limits can be modified globally in the [`limits_config`](https://grafana.com/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](https://grafana.com/docs/loki/latest/configuration/#runtime-configuration-file) file. The config options to use are `ingestion_rate_mb` and `ingestion_burst_size_mb`. +One solution if you're seeing samples dropped due to `rate_limited` is simply to increase the rate limits on your Loki cluster. These limits can be modified globally in the [`limits_config`](/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki/latest/configuration/#runtime-configuration-file) file. The config options to use are `ingestion_rate_mb` and `ingestion_burst_size_mb`. Note that you'll want to make sure your Loki cluster has sufficient resources provisioned to be able to accommodate these higher limits. Otherwise your cluster may experience performance degradation as it tries to handle this higher volume of log lines to ingest. - Another option to address samples being dropped due to `rate_limits` is simply to decrease the rate of log lines being sent to your Loki cluster. Consider collecting logs from fewer targets or setting up `drop` stages in Promtail to filter out certain log lines. Promtail's [limits configuration](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#limits_config) also gives you the ability to control the volume of logs Promtail remote writes to your Loki cluster. + Another option to address samples being dropped due to `rate_limits` is simply to decrease the rate of log lines being sent to your Loki cluster. Consider collecting logs from fewer targets or setting up `drop` stages in Promtail to filter out certain log lines. Promtail's [limits configuration](/docs/loki/latest/clients/promtail/configuration/#limits_config) also gives you the ability to control the volume of logs Promtail remote writes to your Loki cluster. | Property | Value | @@ -48,9 +49,9 @@ This limit is enforced when a single stream reaches its rate-limit. Each stream has a rate-limit applied to it to prevent individual streams from overwhelming the set of ingesters it is distributed to (the size of that set is equal to the `replication_factor` value). -This value can be modified globally in the [`limits_config`](https://grafana.com/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](https://grafana.com/docs/loki/latest/configuration/#runtime-configuration-file) file. The config options to adjust are `per_stream_rate_limit` and `per_stream_rate_limit_burst`. +This value can be modified globally in the [`limits_config`](/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki/latest/configuration/#runtime-configuration-file) file. The config options to adjust are `per_stream_rate_limit` and `per_stream_rate_limit_burst`. -Another option you could consider to decrease the rate of samples dropped due to `per_stream_rate_limit` is to split the stream that is getting rate limited into several smaller streams. A third option is to use Promtail's [limit stage](https://grafana.com/docs/loki/latest/clients/promtail/stages/limit/#limit-stage) to limit the rate of samples sent to the stream hitting the `per_stream_rate_limit`. +Another option you could consider to decrease the rate of samples dropped due to `per_stream_rate_limit` is to split the stream that is getting rate limited into several smaller streams. A third option is to use Promtail's [limit stage](/docs/loki/latest/clients/promtail/stages/limit/#limit-stage) to limit the rate of samples sent to the stream hitting the `per_stream_rate_limit`. We typically recommend setting `per_stream_rate_limit` no higher than 5MB, and `per_stream_rate_limit_burst` no higher than 20MB. @@ -69,7 +70,7 @@ This limit is enforced when a tenant reaches their maximum number of active stre Active streams are held in memory buffers in the ingesters, and if this value becomes sufficiently large then it will cause the ingesters to run out of memory. -This value can be modified globally in the [`limits_config`](https://grafana.com/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](https://grafana.com/docs/loki/latest/configuration/#runtime-configuration-file) file. To increase the allowable active streams, adjust `max_global_streams_per_user`. Alternatively, the number of active streams can be reduced by removing extraneous labels or removing excessive unique label values. +This value can be modified globally in the [`limits_config`](/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki/latest/configuration/#runtime-configuration-file) file. To increase the allowable active streams, adjust `max_global_streams_per_user`. Alternatively, the number of active streams can be reduced by removing extraneous labels or removing excessive unique label values. | Property | Value | |-------------------------|-------------------------| @@ -88,7 +89,7 @@ Validation errors occur when a request violates a validation rule defined by Lok This error occurs when a log line exceeds the maximum allowable length in bytes. The HTTP response will include the stream to which the offending log line belongs as well as its size in bytes. -This value can be modified globally in the [`limits_config`](https://grafana.com/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](https://grafana.com/docs/loki/latest/configuration/#runtime-configuration-file) file. To increase the maximum line size, adjust `max_line_size`. We recommend that you do not increase this value above 256kb for performance reasons. Alternatively, Loki can be configured to ingest truncated versions of log lines over the length limit by using the `max_line_size_truncate` option. +This value can be modified globally in the [`limits_config`](/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki/latest/configuration/#runtime-configuration-file) file. To increase the maximum line size, adjust `max_line_size`. We recommend that you do not increase this value above 256kb for performance reasons. Alternatively, Loki can be configured to ingest truncated versions of log lines over the length limit by using the `max_line_size_truncate` option. | Property | Value | |-------------------------|------------------| @@ -127,9 +128,9 @@ This validation error is returned when a stream is submitted without any labels. The `too_far_behind` and `out_of_order` reasons are identical. Loki clusters with `unordered_writes=true` (the default value as of Loki v2.4) use `reason=too_far_behind`. Loki clusters with `unordered_writes=false` use `reason=out_of_order`. -This validation error is returned when a stream is submitted out of order. More details can be found [here](https://grafana.com/docs/loki/latest/configuration/#accept-out-of-order-writes) about Loki's ordering constraints. +This validation error is returned when a stream is submitted out of order. More details can be found [here](/docs/loki/latest/configuration/#accept-out-of-order-writes) about Loki's ordering constraints. -The `unordered_writes` config value can be modified globally in the [`limits_config`](https://grafana.com/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](https://grafana.com/docs/loki/latest/configuration/#runtime-configuration-file) file, whereas `max_chunk_age` is a global configuration. +The `unordered_writes` config value can be modified globally in the [`limits_config`](/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki/latest/configuration/#runtime-configuration-file) file, whereas `max_chunk_age` is a global configuration. This problem can be solved by ensuring that log delivery is configured correctly, or by increasing the `max_chunk_age` value. @@ -146,7 +147,7 @@ It is recommended to resist modifying the default value of `max_chunk_age` as th If the `reject_old_samples` config option is set to `true` (it is by default), then samples will be rejected with `reason=greater_than_max_sample_age` if they are older than the `reject_old_samples_max_age` value. You should not see samples rejected for `reason=greater_than_max_sample_age` if `reject_old_samples=false`. -This value can be modified globally in the [`limits_config`](https://grafana.com/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](https://grafana.com/docs/loki/latest/configuration/#runtime-configuration-file) file. This error can be solved by increasing the `reject_old_samples_max_age` value, or investigating why log delivery is delayed for this particular stream. The stream in question will be returned in the body of the HTTP response. +This value can be modified globally in the [`limits_config`](/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki/latest/configuration/#runtime-configuration-file) file. This error can be solved by increasing the `reject_old_samples_max_age` value, or investigating why log delivery is delayed for this particular stream. The stream in question will be returned in the body of the HTTP response. | Property | Value | |-------------------------|-------------------| @@ -161,7 +162,7 @@ This value can be modified globally in the [`limits_config`](https://grafana.com If a sample's timestamp is greater than the current timestamp, Loki allows for a certain grace period during which samples will be accepted. If the grace period is exceeded, the error will occur. -This value can be modified globally in the [`limits_config`](https://grafana.com/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](https://grafana.com/docs/loki/latest/configuration/#runtime-configuration-file) file. This error can be solved by increasing the `creation_grace_period` value, or investigating why this particular stream has a timestamp too far into the future. The stream in question will be returned in the body of the HTTP response. +This value can be modified globally in the [`limits_config`](/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki/latest/configuration/#runtime-configuration-file) file. This error can be solved by increasing the `creation_grace_period` value, or investigating why this particular stream has a timestamp too far into the future. The stream in question will be returned in the body of the HTTP response. | Property | Value | |-------------------------|-------------------| @@ -176,7 +177,7 @@ This value can be modified globally in the [`limits_config`](https://grafana.com If a sample is submitted with more labels than Loki has been configured to allow, it will be rejected with the `max_label_names_per_series` reason. Note that 'series' is the same thing as a 'stream' in Loki - the 'series' term is a legacy name. -This value can be modified globally in the [`limits_config`](https://grafana.com/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](https://grafana.com/docs/loki/latest/configuration/#runtime-configuration-file) file. This error can be solved by increasing the `max_label_names_per_series` value. The stream to which the offending sample (i.e. the one with too many label names) belongs will be returned in the body of the HTTP response. +This value can be modified globally in the [`limits_config`](/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki/latest/configuration/#runtime-configuration-file) file. This error can be solved by increasing the `max_label_names_per_series` value. The stream to which the offending sample (i.e. the one with too many label names) belongs will be returned in the body of the HTTP response. | Property | Value | |-------------------------|-------------------| @@ -191,7 +192,7 @@ This value can be modified globally in the [`limits_config`](https://grafana.com If a sample is sent with a label name that has a length in bytes greater than Loki has been configured to allow, it will be rejected with the `label_name_too_long` reason. -This value can be modified globally in the [`limits_config`](https://grafana.com/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](https://grafana.com/docs/loki/latest/configuration/#runtime-configuration-file) file. This error can be solved by increasing the `max_label_name_length` value, though we do not recommend raising it significantly above the default value of `1024` for performance reasons. The offending stream will be returned in the body of the HTTP response. +This value can be modified globally in the [`limits_config`](/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki/latest/configuration/#runtime-configuration-file) file. This error can be solved by increasing the `max_label_name_length` value, though we do not recommend raising it significantly above the default value of `1024` for performance reasons. The offending stream will be returned in the body of the HTTP response. | Property | Value | |-------------------------|-------------------| @@ -206,7 +207,7 @@ This value can be modified globally in the [`limits_config`](https://grafana.com If a sample has a label value with a length in bytes greater than Loki has been configured to allow, it will be rejected for the `label_value_too_long` reason. -This value can be modified globally in the [`limits_config`](https://grafana.com/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](https://grafana.com/docs/loki/latest/configuration/#runtime-configuration-file) file. This error can be solved by increasing the `max_label_value_length` value. The offending stream will be returned in the body of the HTTP response. +This value can be modified globally in the [`limits_config`](/docs/loki/latest/configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki/latest/configuration/#runtime-configuration-file) file. This error can be solved by increasing the `max_label_value_length` value. The offending stream will be returned in the body of the HTTP response. | Property | Value | |-------------------------|-------------------| @@ -230,4 +231,4 @@ The offending stream will be returned in the body of the HTTP response. | Retryable | **No** | | Sample discarded | **Yes** | | Configurable per tenant | No | -| HTTP status code | `400 Bad Request` | \ No newline at end of file +| HTTP status code | `400 Bad Request` | diff --git a/docs/sources/operations/scalability.md b/docs/sources/operations/scalability.md index 157b9a7b5f..974a1a60bd 100644 --- a/docs/sources/operations/scalability.md +++ b/docs/sources/operations/scalability.md @@ -1,10 +1,11 @@ --- title: Scalability +description: Scaling with Grafana Loki weight: 30 --- -# Scaling with Grafana Loki +# Scalability -See [Loki: Prometheus-inspired, open source logging for cloud natives](https://grafana.com/blog/2018/12/12/loki-prometheus-inspired-open-source-logging-for-cloud-natives/) +See [Loki: Prometheus-inspired, open source logging for cloud natives](/blog/2018/12/12/loki-prometheus-inspired-open-source-logging-for-cloud-natives/) for a discussion about Grafana Loki's scalability. When scaling Loki, operators should consider running several Loki processes @@ -16,7 +17,7 @@ and scaling for resource usage. ## Separate Query Scheduler The Query frontend has an in-memory queue that can be moved out into a separate process similar to the -[Grafana Mimir query-scheduler](https://grafana.com/docs/mimir/latest/operators-guide/architecture/components/query-scheduler/). This allows running multiple query frontends. +[Grafana Mimir query-scheduler](/docs/mimir/latest/operators-guide/architecture/components/query-scheduler/). This allows running multiple query frontends. To run with the Query Scheduler, the frontend needs to be passed the scheduler's address via `-frontend.scheduler-address` and the querier processes needs to be started with `-querier.scheduler-address` set to the same address. Both options can also be defined via the [configuration file]({{< relref "../configuration" >}}). diff --git a/docs/sources/operations/shuffle-sharding.md b/docs/sources/operations/shuffle-sharding/_index.md similarity index 98% rename from docs/sources/operations/shuffle-sharding.md rename to docs/sources/operations/shuffle-sharding/_index.md index 87e11343af..773267fe93 100644 --- a/docs/sources/operations/shuffle-sharding.md +++ b/docs/sources/operations/shuffle-sharding/_index.md @@ -65,7 +65,7 @@ Statistically, randomly picking two distinct tenants, there is: - a 0.08% chance that they will share 3 instances - only a 0.0004% chance that their instances will fully overlap -![overlapping instances probability](../shuffle-sharding-probability.png) +![overlapping instances probability](./shuffle-sharding-probability.png) ## Configuration diff --git a/docs/sources/operations/shuffle-sharding-probability.png b/docs/sources/operations/shuffle-sharding/shuffle-sharding-probability.png similarity index 100% rename from docs/sources/operations/shuffle-sharding-probability.png rename to docs/sources/operations/shuffle-sharding/shuffle-sharding-probability.png diff --git a/docs/sources/operations/storage/_index.md b/docs/sources/operations/storage/_index.md index ed245e2ae7..d726e1f8b9 100644 --- a/docs/sources/operations/storage/_index.md +++ b/docs/sources/operations/storage/_index.md @@ -1,5 +1,6 @@ --- -title: Storage +title: Grafana Loki Storage +description: Grafana Loki Storage weight: 40 --- # Grafana Loki Storage @@ -16,20 +17,20 @@ format](#chunk-format) for how chunks are stored internally. The **index** stores each stream's label set and links them to the individual chunks. -Refer to Loki's [configuration](../../configuration/) for details on +Refer to Loki's [configuration]({{}}) for details on how to configure the storage and the index. For more information: -1. [Table Manager](table-manager/) -1. [Retention](retention/) -1. [Logs Deletion](logs-deletion/) +1. [Table Manager]({{}}) +1. [Retention]({{}}) +1. [Logs Deletion]({{}}) ## Supported Stores The following are supported for the index: -- [Single Store (boltdb-shipper) - Recommended for 2.0 and newer](boltdb-shipper/) index store which stores boltdb index files in the object store +- [Single Store (boltdb-shipper) - Recommended for 2.0 and newer]({{}}) index store which stores boltdb index files in the object store - [Amazon DynamoDB](https://aws.amazon.com/dynamodb) - [Google Bigtable](https://cloud.google.com/bigtable) - [Apache Cassandra](https://cassandra.apache.org) @@ -42,7 +43,7 @@ The following are supported for the chunks: - [Apache Cassandra](https://cassandra.apache.org) - [Amazon S3](https://aws.amazon.com/s3) - [Google Cloud Storage](https://cloud.google.com/storage/) -- [Filesystem](filesystem/) (please read more about the filesystem to understand the pros/cons before using with production data) +- [Filesystem]({{}}) (please read more about the filesystem to understand the pros/cons before using with production data) - [Baidu Object Storage](https://cloud.baidu.com/product/bos.html) ## Cloud Storage Permissions @@ -58,7 +59,7 @@ When using S3 as object storage, the following permissions are needed: Resources: `arn:aws:s3:::`, `arn:aws:s3:::/*` -See the [AWS deployment section](../../storage/#aws-deployment-s3-single-store) on the storage page for a detailed setup guide. +See the [AWS deployment section]({{}}) on the storage page for a detailed setup guide. ### DynamoDB diff --git a/docs/sources/operations/storage/boltdb-shipper.md b/docs/sources/operations/storage/boltdb-shipper.md index 90a5888449..9fa224c90c 100644 --- a/docs/sources/operations/storage/boltdb-shipper.md +++ b/docs/sources/operations/storage/boltdb-shipper.md @@ -1,7 +1,8 @@ --- title: Single Store (boltdb-shipper) +description: Single Store (boltdb-shipper) --- -# Single Store Loki (boltdb-shipper index type) +# Single Store (boltdb-shipper) BoltDB Shipper lets you run Grafana Loki without any dependency on NoSQL stores for storing index. It locally stores the index in BoltDB files instead and keeps shipping those files to a shared object store i.e the same object store which is being used for storing chunks. @@ -104,14 +105,14 @@ Within Kubernetes, if you are not using an Index Gateway, we recommend running Q An Index Gateway downloads and synchronizes the BoltDB index from the Object Storage in order to serve index queries to the Queriers and Rulers over gRPC. This avoids running Queriers and Rulers with a disk for persistence. Disks can become costly in a big cluster. -To run an Index Gateway, configure [StorageConfig](../../../configuration/#storage_config) and set the `-target` CLI flag to `index-gateway`. -To connect Queriers and Rulers to the Index Gateway, set the address (with gRPC port) of the Index Gateway with the `-boltdb.shipper.index-gateway-client.server-address` CLI flag or its equivalent YAML value under [StorageConfig](../../../configuration/#storage_config). +To run an Index Gateway, configure [StorageConfig]({{}}) and set the `-target` CLI flag to `index-gateway`. +To connect Queriers and Rulers to the Index Gateway, set the address (with gRPC port) of the Index Gateway with the `-boltdb.shipper.index-gateway-client.server-address` CLI flag or its equivalent YAML value under [StorageConfig]({{}}). When using the Index Gateway within Kubernetes, we recommend using a StatefulSet with persistent storage for downloading and querying index files. This can obtain better read performance, avoids [noisy neighbor problems](https://en.wikipedia.org/wiki/Cloud_computing_issues#Performance_interference_and_noisy_neighbors) by not using the node disk, and avoids the time consuming index downloading step on startup after rescheduling to a new node. ### Write Deduplication disabled -Loki does write deduplication of chunks and index using Chunks and WriteDedupe cache respectively, configured with [ChunkStoreConfig](../../../configuration/#chunk_store_config). +Loki does write deduplication of chunks and index using Chunks and WriteDedupe cache respectively, configured with [ChunkStoreConfig]({{}}). The problem with write deduplication when using `boltdb-shipper` though is ingesters only keep uploading boltdb files periodically to make them available to all the other services which means there would be a brief period where some of the services would not have received updated index yet. The problem due to that is if an ingester which first wrote the chunks and index goes down and all the other ingesters which were part of replication scheme skipped writing those chunks and index due to deduplication, we would end up missing those logs from query responses since only the ingester which had the index went down. This problem would be faced even during rollouts which is quite common. diff --git a/docs/sources/operations/storage/filesystem.md b/docs/sources/operations/storage/filesystem.md index b98d6c4110..56b71ce1fe 100644 --- a/docs/sources/operations/storage/filesystem.md +++ b/docs/sources/operations/storage/filesystem.md @@ -1,7 +1,8 @@ --- title: Filesystem +description: Filesystem Object Store --- -# Filesystem Object Store +# Filesystem The filesystem object store is the easiest to get started with Grafana Loki but there are some pros/cons to this approach. @@ -17,7 +18,7 @@ A folder is created for every tenant all the chunks for one tenant are stored in If Loki is run in single-tenant mode, all the chunks are put in a folder named `fake` which is the synthesized tenant name used for single tenant mode. -See [multi-tenancy](../../multi-tenancy/) for more information. +See [multi-tenancy]({{}}) for more information. ## Pros diff --git a/docs/sources/operations/storage/logs-deletion.md b/docs/sources/operations/storage/logs-deletion.md index 63c5c6e1ad..2ebbecc651 100644 --- a/docs/sources/operations/storage/logs-deletion.md +++ b/docs/sources/operations/storage/logs-deletion.md @@ -11,11 +11,11 @@ Log entries that fall within a specified time window and match an optional line Log entry deletion is supported _only_ when the BoltDB Shipper is configured for the index store. -The compactor component exposes REST [endpoints](../../../api/#compactor) that process delete requests. +The compactor component exposes REST [endpoints]({{}}) that process delete requests. Hitting the endpoint specifies the streams and the time window. The deletion of the log entries takes place after a configurable cancellation time period expires. -Log entry deletion relies on configuration of the custom logs retention workflow as defined for the [compactor](../retention#compactor). The compactor looks at unprocessed requests which are past their cancellation period to decide whether a chunk is to be deleted or not. +Log entry deletion relies on configuration of the custom logs retention workflow as defined for the [compactor]({{}}). The compactor looks at unprocessed requests which are past their cancellation period to decide whether a chunk is to be deleted or not. ## Configuration diff --git a/docs/sources/operations/storage/retention.md b/docs/sources/operations/storage/retention.md index bb996bda8c..0ad4f6ddee 100644 --- a/docs/sources/operations/storage/retention.md +++ b/docs/sources/operations/storage/retention.md @@ -1,19 +1,20 @@ --- title: Retention +description: Grafana Loki Storage Retention --- -# Grafana Loki Storage Retention +# Retention Retention in Grafana Loki is achieved either through the [Table Manager](#table-manager) or the [Compactor](#compactor). By default, when `table_manager.retention_deletes_enabled` or `compactor.retention_enabled` flags are not set, then logs sent to Loki live forever. -Retention through the [Table Manager](../table-manager/) is achieved by relying on the object store TTL feature, and will work for both [boltdb-shipper](../boltdb-shipper) store and chunk/index store. However retention through the [Compactor](../boltdb-shipper#compactor) is supported only with the [boltdb-shipper](../boltdb-shipper) store. +Retention through the [Table Manager]({{}}) is achieved by relying on the object store TTL feature, and will work for both [boltdb-shipper]({{}}) store and chunk/index store. However retention through the [Compactor]({{}}) is supported only with the [boltdb-shipper]({{}}) store. The Compactor retention will become the default and have long term support. It supports more granular retention policies on per tenant and per stream use cases. ## Compactor -The [Compactor](../boltdb-shipper#compactor) can deduplicate index entries. It can also apply granular retention. When applying retention with the Compactor, the [Table Manager](../table-manager/) is unnecessary. +The [Compactor]({{}}) can deduplicate index entries. It can also apply granular retention. When applying retention with the Compactor, the [Table Manager]({{}}) is unnecessary. > Run the compactor as a singleton (a single instance). @@ -87,7 +88,7 @@ The index period must be 24h. #### Configuring the retention period -Retention period is configured within the [`limits_config`](./../../../configuration/#limits_config) configuration section. +Retention period is configured within the [`limits_config`]({{}}) configuration section. There are two ways of setting retention policies: @@ -161,7 +162,7 @@ The example configurations will set these rules: In order to enable the retention support, the Table Manager needs to be configured to enable deletions and a retention period. Please refer to the -[`table_manager`](../../../configuration#table_manager) +[`table_manager`]({{}}) section of the Loki configuration reference for all available options. Alternatively, the `table-manager.retention-period` and `table-manager.retention-deletes-enabled` command line flags can be used. The @@ -169,12 +170,12 @@ provided retention period needs to be a duration represented as a string that can be parsed using the Prometheus common model [ParseDuration](https://pkg.go.dev/github.com/prometheus/common/model#ParseDuration). Examples: `7d`, `1w`, `168h`. > **WARNING**: The retention period must be a multiple of the index and chunks table -`period`, configured in the [`period_config`](../../../configuration#period_config) -block. See the [Table Manager](../table-manager#retention) documentation for +`period`, configured in the [`period_config`]({{}}) +block. See the [Table Manager]({{}}) documentation for more information. > **NOTE**: To avoid querying of data beyond the retention period, -`max_look_back_period` config in [`chunk_store_config`](../../../configuration#chunk_store_config) must be set to a value less than or equal to +`max_look_back_period` config in [`chunk_store_config`]({{}}) must be set to a value less than or equal to what is set in `table_manager.retention_period`. When using S3 or GCS, the bucket storing the chunks needs to have the expiry @@ -194,7 +195,7 @@ intact; you will still be able to see related labels but will be unable to retrieve the deleted log content. For further details on the Table Manager internals, refer to the -[Table Manager](../table-manager/) documentation. +[Table Manager]({{}}) documentation. ## Example Configuration diff --git a/docs/sources/operations/storage/schema.md b/docs/sources/operations/storage/schema/_index.md similarity index 96% rename from docs/sources/operations/storage/schema.md rename to docs/sources/operations/storage/schema/_index.md index 51907a2c3a..af0b644a23 100644 --- a/docs/sources/operations/storage/schema.md +++ b/docs/sources/operations/storage/schema/_index.md @@ -1,11 +1,12 @@ --- title: Storage schema +description: Storage schema --- -# Loki storage schema +# Storage schema To support iterations over the storage layer contents, Loki has a configurable storage schema. The schema is defined to apply over periods of time. A `from` value marks the starting point of that schema. The schema is active until another entry defines a new schema with a new `from` date. -![schema_example](../schema.png) +![schema_example](./schema.png) Loki uses the defined schemas to determine which format to use when storing and querying the data. diff --git a/docs/sources/operations/storage/schema.png b/docs/sources/operations/storage/schema/schema.png similarity index 100% rename from docs/sources/operations/storage/schema.png rename to docs/sources/operations/storage/schema/schema.png diff --git a/docs/sources/operations/storage/table-manager.md b/docs/sources/operations/storage/table-manager/_index.md similarity index 88% rename from docs/sources/operations/storage/table-manager.md rename to docs/sources/operations/storage/table-manager/_index.md index bc2f65a99e..9542eb5d22 100644 --- a/docs/sources/operations/storage/table-manager.md +++ b/docs/sources/operations/storage/table-manager/_index.md @@ -1,7 +1,8 @@ --- title: Table manager +description: Table manager --- -# Table Manager +# Table manager Grafana Loki supports storing indexes and chunks in table-based data storages. When such a storage type is used, multiple tables are created over the time: each @@ -22,7 +23,7 @@ time range exceeds the retention period. The Table Manager supports the following backends: - **Index store** - - [Single Store (boltdb-shipper)](../boltdb-shipper/) + - [Single Store (boltdb-shipper)]({{}}) - [Amazon DynamoDB](https://aws.amazon.com/dynamodb) - [Google Bigtable](https://cloud.google.com/bigtable) - [Apache Cassandra](https://cassandra.apache.org) @@ -38,7 +39,7 @@ to store chunks, are not managed by the Table Manager, and a custom bucket polic should be set to delete old data. For detailed information on configuring the Table Manager, refer to the -[`table_manager`](../../../configuration#table_manager) +[`table_manager`]({{}}) section in the Loki configuration document. @@ -47,10 +48,10 @@ section in the Loki configuration document. A periodic table stores the index or chunk data relative to a specific period of time. The duration of the time range of the data stored in a single table and its storage type is configured in the -[`schema_config`](../../../configuration#schema_config) configuration +[`schema_config`]({{}}) configuration block. -The [`schema_config`](../../../configuration#schema_config) can contain +The [`schema_config`]({{}}) can contain one or more `configs`. Each config, defines the storage used between the day set in `from` (in the format `yyyy-mm-dd`) and the next config, or "now" in the case of the last schema config entry. @@ -59,7 +60,7 @@ This allows to have multiple non-overlapping schema configs over the time, in order to perform schema version upgrades or change storage settings (including changing the storage type). -![periodic_tables](../table-manager-periodic-tables.png) +![periodic_tables](./table-manager-periodic-tables.png) The write path hits the table where the log entry timestamp falls into (usually the last table, except short periods close to the end of a table and the @@ -104,7 +105,7 @@ order to make sure that the new table is ready once the current table end period is reached. The `creation_grace_period` property - in the -[`table_manager`](../../../configuration#table_manager) +[`table_manager`]({{}}) configuration block - defines how long before a table should be created. @@ -132,14 +133,14 @@ is deleted, the Table Manager keeps the last tables alive using this formula: number_of_tables_to_keep = floor(retention_period / table_period) + 1 ``` -![retention](../table-manager-retention.png) +![retention](./table-manager-retention.png) It's important to note that - due to the internal implementation - the table `period` and `retention_period` **must** be multiples of `24h` in order to get the expected behavior. For detailed information on configuring the retention, refer to the -[Loki Storage Retention](../retention/) +[Loki Storage Retention]({{}}) documentation. @@ -148,10 +149,10 @@ documentation. A table can be active or inactive. A table is considered **active** if the current time is within the range: -- Table start period - [`creation_grace_period`](../../../configuration#table_manager) +- Table start period - [`creation_grace_period`]({{}}) - Table end period + max chunk age (hardcoded to `12h`) -![active_vs_inactive_tables](../table-manager-active-vs-inactive-tables.png) +![active_vs_inactive_tables](./table-manager-active-vs-inactive-tables.png) Currently, the difference between an active and inactive table **only applies to the DynamoDB storage** settings: capacity mode (on-demand or provisioned), @@ -199,13 +200,13 @@ The Table Manager can be executed in two ways: ### Monolithic mode -When Loki runs in [monolithic mode](../../../fundamentals/architecture#modes-of-operation), +When Loki runs in [monolithic mode]({{}}), the Table Manager is also started as component of the entire stack. ### Microservices mode -When Loki runs in [microservices mode](../../../fundamentals/architecture#modes-of-operation), +When Loki runs in [microservices mode]({{}}), the Table Manager should be started as separate service named `table-manager`. You can check out a production grade deployment example at diff --git a/docs/sources/operations/storage/table-manager-active-vs-inactive-tables.png b/docs/sources/operations/storage/table-manager/table-manager-active-vs-inactive-tables.png similarity index 100% rename from docs/sources/operations/storage/table-manager-active-vs-inactive-tables.png rename to docs/sources/operations/storage/table-manager/table-manager-active-vs-inactive-tables.png diff --git a/docs/sources/operations/storage/table-manager-periodic-tables.png b/docs/sources/operations/storage/table-manager/table-manager-periodic-tables.png similarity index 100% rename from docs/sources/operations/storage/table-manager-periodic-tables.png rename to docs/sources/operations/storage/table-manager/table-manager-periodic-tables.png diff --git a/docs/sources/operations/storage/table-manager-retention.png b/docs/sources/operations/storage/table-manager/table-manager-retention.png similarity index 100% rename from docs/sources/operations/storage/table-manager-retention.png rename to docs/sources/operations/storage/table-manager/table-manager-retention.png diff --git a/docs/sources/operations/storage/wal.md b/docs/sources/operations/storage/wal.md index 151b1d3080..0244da9160 100644 --- a/docs/sources/operations/storage/wal.md +++ b/docs/sources/operations/storage/wal.md @@ -1,10 +1,11 @@ --- title: Write Ahead Log +description: Write Ahead Log --- -# Write Ahead Log (WAL) +# Write Ahead Log -Ingesters temporarily store data in memory. In the event of a crash, there could be data loss. The WAL helps fill this gap in reliability. +Ingesters temporarily store data in memory. In the event of a crash, there could be data loss. The Write Ahead Log (WAL) helps fill this gap in reliability. The WAL in Grafana Loki records incoming data and stores it on the local file system in order to guarantee persistence of acknowledged data in the event of a process crash. Upon restart, Loki will "replay" all of the data in the log before registering itself as ready for subsequent writes. This allows Loki to maintain the performance & cost benefits of buffering data in memory _and_ durability benefits (it won't lose data once a write has been acknowledged). @@ -82,7 +83,7 @@ When scaling down, we must ensure existing data on the leaving ingesters are flu Consider you have 4 ingesters `ingester-0 ingester-1 ingester-2 ingester-3` and you want to scale down to 2 ingesters, the ingesters which will be shutdown according to statefulset rules are `ingester-3` and then `ingester-2`. -Hence before actually scaling down in Kubernetes, port forward those ingesters and hit the [`/ingester/flush_shutdown`](../../api#post-ingesterflush_shutdown) endpoint. This will flush the chunks and remove itself from the ring, after which it will register as unready and may be deleted. +Hence before actually scaling down in Kubernetes, port forward those ingesters and hit the [`/ingester/flush_shutdown`]({{}}) endpoint. This will flush the chunks and remove itself from the ring, after which it will register as unready and may be deleted. After hitting the endpoint for `ingester-2 ingester-3`, scale down the ingesters to 2. diff --git a/docs/sources/operations/troubleshooting.md b/docs/sources/operations/troubleshooting.md index 4ac0aa5039..b465006361 100644 --- a/docs/sources/operations/troubleshooting.md +++ b/docs/sources/operations/troubleshooting.md @@ -1,10 +1,11 @@ --- title: Troubleshooting +description: Troubleshooting Grafana Loki weight: 80 aliases: - /docs/loki/latest/getting-started/troubleshooting/ --- -# Troubleshooting Grafana Loki +# Troubleshooting ## "Loki: Bad Gateway. 502" @@ -60,9 +61,9 @@ can have many possible causes. If you have a reverse proxy in front of Loki, that is, between Loki and Grafana, then check any configured timeouts, such as an NGINX proxy read timeout. - Other causes. To determine if the issue is related to Loki itself or another system such as Grafana or a client-side error, -attempt to run a [LogCLI](../../tools/logcli/) query in as direct a manner as you can. For example, if running on virtual machines, run the query on the local machine. If running in a Kubernetes cluster, then port forward the Loki HTTP port, and attempt to run the query there. If you do not get a timeout, then consider these causes: +attempt to run a [LogCLI]({{}}) query in as direct a manner as you can. For example, if running on virtual machines, run the query on the local machine. If running in a Kubernetes cluster, then port forward the Loki HTTP port, and attempt to run the query there. If you do not get a timeout, then consider these causes: - - Adjust the [Grafana dataproxy timeout](https://grafana.com/docs/grafana/latest/administration/configuration/#dataproxy). Configure Grafana with a large enough dataproxy timeout. + - Adjust the [Grafana dataproxy timeout](/docs/grafana/latest/administration/configuration/#dataproxy). Configure Grafana with a large enough dataproxy timeout. - Check timeouts for reverse proxies or load balancers between your client and Grafana. Queries to Grafana are made from the your local browser with Grafana serving as a proxy (a dataproxy). Therefore, connections from your client to Grafana must have their timeout configured as well. ## Cache Generation errors @@ -79,7 +80,7 @@ Loki cache generation number errors(Loki >= 2.6) - Check the metric `loki_delete_cache_gen_load_failures_total` on `/metrics`, which is an indicator for the occurrence of the problem. If the value is greater than 1, it means that there is a problem with that component. - Try Http GET request to route: /loki/api/v1/cache/generation_numbers - - If response is equal as `"deletion is not available for this tenant"`, this means the deletion API is not enabled for the tenant. To enable this api, set `allow_deletes: true` for this tenant via the configuration settings. Check more docs: https://grafana.com/docs/loki/latest/operations/storage/logs-deletion/ + - If response is equal as `"deletion is not available for this tenant"`, this means the deletion API is not enabled for the tenant. To enable this api, set `allow_deletes: true` for this tenant via the configuration settings. Check more docs: /docs/loki/latest/operations/storage/logs-deletion/ ## Troubleshooting targets diff --git a/docs/sources/release-notes/_index.md b/docs/sources/release-notes/_index.md index ce91be7610..24a9789f41 100644 --- a/docs/sources/release-notes/_index.md +++ b/docs/sources/release-notes/_index.md @@ -1,5 +1,6 @@ --- title: Release notes +description: Release notes weight: 100 --- # Release notes @@ -7,8 +8,8 @@ weight: 100 Release notes for Loki are in the CHANGELOG for the release and listed here by version number. -- [V2.7 release notes](../release-notes/v2-7/) -- [V2.6 release notes](../release-notes/v2-6/) -- [V2.5 release notes](../release-notes/v2-5/) -- [V2.4 release notes](../release-notes/v2-4/) -- [V2.3 release notes](../release-notes/v2-3/) +- [V2.7 release notes]({{}}) +- [V2.6 release notes]({{}}) +- [V2.5 release notes]({{}}) +- [V2.4 release notes]({{}}) +- [V2.3 release notes]({{}}) diff --git a/docs/sources/release-notes/v2-3.md b/docs/sources/release-notes/v2-3.md index 4122070e81..6ec6531cd8 100644 --- a/docs/sources/release-notes/v2-3.md +++ b/docs/sources/release-notes/v2-3.md @@ -1,29 +1,30 @@ --- title: V2.3 +description: Version 2.3 release notes weight: 99 --- -# Version 2.3 release notes +# V2.3 The Loki team is excited to announce the release of Loki 2.3! It's been nearly 6 months since 2.2 was released and we have made good use of that time to bring forward several significant improvements and requested features. -2.3 is also the first version of Loki released under the AGPLv3 license. You can [read more about our licensing here](https://grafana.com/licensing/). +2.3 is also the first version of Loki released under the AGPLv3 license. You can [read more about our licensing here](/licensing/). Some parts of the Loki repo will remain Apache-2.0 licensed (mainly clients and some tooling), for more details please read [LICENSING.md](https://github.com/grafana/loki/blob/main/LICENSING.md). ## Features and enhancements -* Loki now has the ability to apply [custom retention](../../operations/storage/retention/) based on stream selectors! This will allow much finer control over log retention all of which is now handled by Loki, no longer requiring the use of object store configs for retention. -* Coming along hand in hand with storing logs for longer durations is the ability to [delete log streams](../../operations/storage/logs-deletion/). The initial implementation lets you submit delete request jobs which will be processed after 24 hours. -* A very exciting new LogQL parser has been introduced: the [pattern parser](../../logql/#parser-expression). Much simpler and faster than regexp for log lines that have a little bit of structure to them such as the [Common Log Format](https://en.wikipedia.org/wiki/Common_Log_Format). This is now Loki's fastest parser so try it out on any of your log lines! -* Extending on the work of Alerting Rules, Loki now accepts [recording rules](../../rules/#recording-rules). This lets you turn your logs into metrics and push them to Prometheus or any Prometheus compatible remote_write endpoint. -* LogQL can understand [IP addresses](../../logql/ip/)! This enables filtering on IP addresses and subnet ranges. +* Loki now has the ability to apply [custom retention]({{}}) based on stream selectors! This will allow much finer control over log retention all of which is now handled by Loki, no longer requiring the use of object store configs for retention. +* Coming along hand in hand with storing logs for longer durations is the ability to [delete log streams]({{}}). The initial implementation lets you submit delete request jobs which will be processed after 24 hours. +* A very exciting new LogQL parser has been introduced: the [pattern parser]({{}}). Much simpler and faster than regexp for log lines that have a little bit of structure to them such as the [Common Log Format](https://en.wikipedia.org/wiki/Common_Log_Format). This is now Loki's fastest parser so try it out on any of your log lines! +* Extending on the work of Alerting Rules, Loki now accepts [recording rules]({{}}). This lets you turn your logs into metrics and push them to Prometheus or any Prometheus compatible remote_write endpoint. +* LogQL can understand [IP addresses]({{}})! This enables filtering on IP addresses and subnet ranges. For those of you running Loki as microservices, the following features will improve performance operations significantly for many operations. -* We created an [index gateway](../../operations/storage/boltdb-shipper/#index-gateway) which takes on the task of downloading the boltdb-shipper index files allowing you to run your queriers without any local disk requirements, this is really helpful in Kubernetes environments where you can return your queriers from Statefulsets back to Deployments and save a lot of PVC costs and operational headaches. +* We created an [index gateway]({{}}) which takes on the task of downloading the boltdb-shipper index files allowing you to run your queriers without any local disk requirements, this is really helpful in Kubernetes environments where you can return your queriers from Statefulsets back to Deployments and save a lot of PVC costs and operational headaches. * Ingester queriers [are now shardable](https://github.com/grafana/loki/pull/3852), this is a significant performance boost for high volume log streams when querying recent data. * Instant queries can now be [split and sharded](https://github.com/grafana/loki/pull/3984) making them just as fast as range queries. @@ -35,7 +36,7 @@ Without revisiting the decisions and discussions around the somewhat controversi Lastly several useful additions to the LogQL query language have been included: -* More text/template functions are included for `label_format` and `line_format` with PR [3515](https://github.com/grafana/loki/pull/3515), please check out the [documentation for template functions](https://grafana.com/docs/loki/latest/logql/template_functions/). +* More text/template functions are included for `label_format` and `line_format` with PR [3515](https://github.com/grafana/loki/pull/3515), please check out the [documentation for template functions](/docs/loki/latest/logql/template_functions/). * Also support for math functions withing `label_format` and `line_format` was included with [3434](https://github.com/grafana/loki/pull/3434). * Two additional metric functions with some interesting use cases `first_over_time` and `last_over_time` were added in PR [3050](https://github.com/grafana/loki/pull/3050). These can be useful for some down sampling approaches where instead of taking an average, max, or min of samples over a range in a metrics query, you can select the first or last log line to use from that range. @@ -87,4 +88,4 @@ Lists of bug fixes for 2.3.x. ### 2.3.0 bug fixes * An important fix for leaking resources was patched with [3733](https://github.com/grafana/loki/pull/3733), when queries were canceled a goroutine was left running which would hold memory resources creating a memory leak. -* [3686](https://github.com/grafana/loki/pull/3686) fixes a panic with the frontend when use with downstream URL. **Note** we recommend using the [GRPC Pull Model](https://grafana.com/docs/loki/latest/configuration/query-frontend/#grpc-mode-pull-model), better performance and fair scheduling between tenants can be obtained with the GPRC Pull Model. +* [3686](https://github.com/grafana/loki/pull/3686) fixes a panic with the frontend when use with downstream URL. **Note** we recommend using the [GRPC Pull Model](/docs/loki/latest/configuration/query-frontend/#grpc-mode-pull-model), better performance and fair scheduling between tenants can be obtained with the GPRC Pull Model. diff --git a/docs/sources/release-notes/v2-4.md b/docs/sources/release-notes/v2-4.md index f4b031527d..9efd60db09 100644 --- a/docs/sources/release-notes/v2-4.md +++ b/docs/sources/release-notes/v2-4.md @@ -1,9 +1,10 @@ --- title: V2.4 +description: Version 2.4 release notes weight: 88 --- -# Version 2.4 release notes +# V2.4 Loki 2.4 focuses on two items: @@ -12,13 +13,13 @@ Loki 2.4 focuses on two items: ## Features and enhancements -* [**Loki no longer requires logs to be sent in perfect chronological order.**](../../configuration/#accept-out-of-order-writes) Support for out of order logs is one of the most highly requested features for Loki. The strict ordering constraint has been removed. -* Scaling Loki is now easier with a hybrid deployment mode that falls between our single binary and our microservices. The [Simple scalable deployment](../../fundamentals/architecture/#modes-of-operation) scales Loki with new `read` and `write` targets. Where previously you would have needed Kubernetes and the microservices approach to start tapping into Loki’s potential, it’s now possible to do this in a simpler way. -* The new [`common` section](../../configuration/#common) results in a 70% smaller Loki configuration. Pair that with updated defaults and Loki comes out of the box with more appropriate defaults and limits. Check out the [example local configuration](https://github.com/grafana/loki/blob/main/cmd/loki/loki-local-config.yaml) as the new reference for running Loki. -* [**Recording rules**](../../rules/#recording-rules) are no longer an experimental feature. We've given them a more resilient implementation which leverages the existing write ahead log code in Prometheus. -* The new [**Promtail Kafka Consumer**](../../clients/promtail/scraping/#kafka) can easily get your logs out of Kafka and into Loki. -* There are **nice LogQL enhancements**, thanks to the amazing Loki community. LogQL now has [group_left and group_right](../../logql/#many-to-one-and-one-to-many-vector-matches). And, the `label_format` and `line_format` functions now support [working with dates and times](../../logql/template_functions/#now). -* Another great community contribution allows Promtail to [**accept ndjson and plaintext log files over HTTP**](../../clients/promtail/configuration/#loki_push_api). +* [**Loki no longer requires logs to be sent in perfect chronological order.**]({{}}) Support for out of order logs is one of the most highly requested features for Loki. The strict ordering constraint has been removed. +* Scaling Loki is now easier with a hybrid deployment mode that falls between our single binary and our microservices. The [Simple scalable deployment]({{}}) scales Loki with new `read` and `write` targets. Where previously you would have needed Kubernetes and the microservices approach to start tapping into Loki’s potential, it’s now possible to do this in a simpler way. +* The new [`common` section]({{}}) results in a 70% smaller Loki configuration. Pair that with updated defaults and Loki comes out of the box with more appropriate defaults and limits. Check out the [example local configuration](https://github.com/grafana/loki/blob/main/cmd/loki/loki-local-config.yaml) as the new reference for running Loki. +* [**Recording rules**]({{}}) are no longer an experimental feature. We've given them a more resilient implementation which leverages the existing write ahead log code in Prometheus. +* The new [**Promtail Kafka Consumer**]({{}}) can easily get your logs out of Kafka and into Loki. +* There are **nice LogQL enhancements**, thanks to the amazing Loki community. LogQL now has [group_left and group_right]({{}}). And, the `label_format` and `line_format` functions now support [working with dates and times]({{}}). +* Another great community contribution allows Promtail to [**accept ndjson and plaintext log files over HTTP**]({{}}). All in all, about 260 PR’s went into Loki 2.4, and we thank everyone for helping us make the best Loki yet. @@ -26,7 +27,7 @@ For a full list of all changes please look at the [CHANGELOG](https://github.com ## Upgrade Considerations -Please read the [upgrade guide](../../upgrading/#240) before updating Loki. +Please read the [upgrade guide]({{}}) before updating Loki. We made a lot of changes to Loki’s configuration as part of this release. We have tried our best to make sure changes are compatible with existing configurations, however some changes to default limits may impact users who didn't have values explicitly set for these limits in their configuration files. diff --git a/docs/sources/release-notes/v2-5.md b/docs/sources/release-notes/v2-5.md index f5abce0102..42334af97f 100644 --- a/docs/sources/release-notes/v2-5.md +++ b/docs/sources/release-notes/v2-5.md @@ -1,9 +1,10 @@ --- title: V2.5 +description: Version 2.5 release notes weight: 77 --- -# Version 2.5 release notes +# V2.5 It has been nearly 6 months since Loki 2.4 was released, and we’ve been busy making Loki better than ever. Here's a summary of new enhancements and important fixes. @@ -13,7 +14,7 @@ It has been nearly 6 months since Loki 2.4 was released, and we’ve been busy m - **[Binary operations are now significantly faster](https://github.com/grafana/loki/pull/5317)**, taking full advantage of Loki's parallelism. - **[A new schema is available](https://github.com/grafana/loki/pull/5054)**, which uses more path prefixes to avoid rate limits on S3. - That same schema change **[was also added to the filesystem store](https://github.com/grafana/loki/pull/5291)**, which avoids using one directory to store every chunk. -- A new capability for **[hedging requests to storage](https://github.com/grafana/loki/pull/4826)** improves performance on highly parallelized queries. Refer to the [hedging configuration](../../configuration/#hedging) block for more information. +- A new capability for **[hedging requests to storage](https://github.com/grafana/loki/pull/4826)** improves performance on highly parallelized queries. Refer to the [hedging configuration]({{}}) under the `storage_config` block for more information. - Promtail has several new ways to ingest logs: - The **[ability to do service discovery and tailing directly from the Docker daemon](https://github.com/grafana/loki/pull/4911)**. - **[Fetching logs directly from Cloudflare](https://github.com/grafana/loki/pull/4813)**. @@ -24,7 +25,7 @@ For a full list of all changes please look at the [CHANGELOG](https://github.com ## Upgrade Considerations -As always, please read the [upgrade guide](../../upgrading/#250) before upgrading Loki. +As always, please read the [upgrade guide]({{}}) before upgrading Loki. ### Changes to the config `split_queries_by_interval` The most likely impact many people will see is Loki failing to start because of a change in the YAML configuration for `split_queries_by_interval`. It was previously possible to define this value in two places. @@ -63,7 +64,7 @@ Usage reporting helps provide anonymous information on how people use Loki and w If possible, we ask you to leave the usage reporting feature enabled and help us understand more about Loki! We are also working to figure out how we can share this info with the community so everyone can watch Loki grow. -If you would rather not participate in usage stats reporting, [the feature can be disabled in config](https://grafana.com/docs/loki/latest/configuration/#analytics) +If you would rather not participate in usage stats reporting, [the feature can be disabled in config](/docs/loki/latest/configuration/#analytics) ``` analytics: diff --git a/docs/sources/release-notes/v2-6.md b/docs/sources/release-notes/v2-6.md index b216087533..058b1ce167 100644 --- a/docs/sources/release-notes/v2-6.md +++ b/docs/sources/release-notes/v2-6.md @@ -1,9 +1,10 @@ --- title: V2.6 +description: Version 2.6 release notes weight: 66 --- -# Version 2.6 release notes +# V2.6 Grafana Labs is excited to announce the release of Loki 2.6. Here's a summary of new enhancements and important fixes. @@ -18,7 +19,7 @@ For a full list of all changes please look at the [CHANGELOG](https://github.com ## Upgrade Considerations -As always, please read the [upgrade guide](../../upgrading/#260) before upgrading Loki. +As always, please read the [upgrade guide]({{}}) before upgrading Loki. ## Bug fixes @@ -39,4 +40,4 @@ A summary of some of the more important fixes: - [PR 6152](https://github.com/grafana/loki/pull/6152) Fixed a scenario where live tailing of logs could cause unbounded ingester memory growth. - [PR 5685](https://github.com/grafana/loki/pull/5685) Fixed a bug in Loki's push request parser that allowed users to send arbitrary non-string data as a log line. We now test that the pushed values are valid strings and return an error if values are not valid strings. - [PR 5799](https://github.com/grafana/loki/pull/5799) Fixed incorrect deduplication logic for cases where multiple log entries with the same timestamp exist. -- [PR 5888](https://github.com/grafana/loki/pull/5888) Fixed a bug in the [common configuration]({{< relref "../configuration/_index.md#common" >}}) where the `instance_interface_names` setting was getting overwritten by the default ring configuration. \ No newline at end of file +- [PR 5888](https://github.com/grafana/loki/pull/5888) Fixed a bug in the [common configuration]({{< relref "../configuration/_index.md#common" >}}) where the `instance_interface_names` setting was getting overwritten by the default ring configuration. diff --git a/docs/sources/release-notes/v2-7.md b/docs/sources/release-notes/v2-7.md index 683df0d5c7..784ea0b500 100644 --- a/docs/sources/release-notes/v2-7.md +++ b/docs/sources/release-notes/v2-7.md @@ -1,9 +1,10 @@ --- title: V2.7 +description: Version 2.7 release notes weight: 60 --- -# Version 2.7 release notes +# V2.7 Grafana Labs is excited to announce the release of Loki 2.7. Here's a summary of new enhancements and important fixes: @@ -13,7 +14,7 @@ Grafana Labs is excited to announce the release of Loki 2.7. Here's a summary of - **Better Support for Azure Blob Storage** thanks to the ability to use Azure's Service Principal Credentials. - **Logs can now be pushed from the Loki canary** so you don't have to rely on a scraping service to use the canary. - **Additional `label_format` fields** `__timestamp__` and `__line__`. -- **`fifocache` has been renamed** The in-memory `fifocache` has been renamed to `embedded-cache`. Check [upgrade guide](../../upgrading/#270) for more details +- **`fifocache` has been renamed** The in-memory `fifocache` has been renamed to `embedded-cache`. Check [upgrade guide]({{}}) for more details - **New HTTP endpoint for Ingester shutdown** that will also delete the ring token. - **Faster label queries** thanks to new parallization. - **Introducing Stream Sharding** an experimental new feature to help deal with very large streams. @@ -29,7 +30,7 @@ For a full list of all changes please look at the [CHANGELOG](https://github.com ## Upgrade Considerations -As always, please read the [upgrade guide](../../upgrading/#270) before upgrading Loki. +As always, please read the [upgrade guide]({{}}) before upgrading Loki. ## Bug fixes diff --git a/docs/sources/rules/_index.md b/docs/sources/rules/_index.md index 99339d2239..947b47a816 100644 --- a/docs/sources/rules/_index.md +++ b/docs/sources/rules/_index.md @@ -2,10 +2,11 @@ aliases: - /alerting/ title: Alerting and Recording Rules +description: Alerting and Recording Rules weight: 700 --- -# Rules and the Ruler +# Alerting and Recording Rules Grafana Loki includes a component called the ruler. The ruler is responsible for continually evaluating a set of configurable queries and performing an action based on the result. @@ -76,7 +77,7 @@ We support [Prometheus-compatible](https://prometheus.io/docs/prometheus/latest/ > Querying the precomputed result will then often be much faster than executing the original expression every time it is needed. This is especially useful for dashboards, which need to query the same expression repeatedly every time they refresh. -Loki allows you to run [metric queries](../logql/metric_queries) over your logs, which means +Loki allows you to run [metric queries]({{}}) over your logs, which means that you can derive a numeric aggregation from your logs, like calculating the number of requests over time from your NGINX access log. ### Example @@ -107,7 +108,7 @@ At the time of writing, these are the compatible backends that support this: - [Prometheus](https://prometheus.io/docs/prometheus/latest/disabled_features/#remote-write-receiver) (`>=v2.25.0`): Prometheus is generally a pull-based system, but since `v2.25.0` has allowed for metrics to be written directly to it as well. -- [Grafana Mimir](https://grafana.com/docs/mimir/latest/operators-guide/reference-http-api/#remote-write) +- [Grafana Mimir](/docs/mimir/latest/operators-guide/reference-http-api/#remote-write) - [Thanos (`Receiver`)](https://thanos.io/tip/components/receive.md/) Here is an example remote-write configuration for sending to a local Prometheus instance: @@ -122,11 +123,11 @@ ruler: url: http://localhost:9090/api/v1/write ``` -Further configuration options can be found under [ruler](../configuration#ruler). +Further configuration options can be found under [ruler]({{}}). ### Operations -Please refer to the [Recording Rules](../operations/recording-rules/) page. +Please refer to the [Recording Rules]({{}}) page. ## Use cases @@ -230,7 +231,7 @@ jobs: One option to scale the Ruler is by scaling it horizontally. However, with multiple Ruler instances running they will need to coordinate to determine which instance will evaluate which rule. Similar to the ingesters, the Rulers establish a hash ring to divide up the responsibilities of evaluating rules. -The possible configurations are listed fully in the [configuration documentation](../configuration/), but in order to shard rules across multiple Rulers, the rules API must be enabled via flag (`-ruler.enable-api`) or config file parameter. Secondly, the Ruler requires it's own ring be configured. From there the Rulers will shard and handle the division of rules automatically. Unlike ingesters, Rulers do not hand over responsibility: all rules are re-sharded randomly every time a Ruler is added to or removed from the ring. +The possible configurations are listed fully in the [configuration documentation]({{}}), but in order to shard rules across multiple Rulers, the rules API must be enabled via flag (`-ruler.enable-api`) or config file parameter. Secondly, the Ruler requires it's own ring be configured. From there the Rulers will shard and handle the division of rules automatically. Unlike ingesters, Rulers do not hand over responsibility: all rules are re-sharded randomly every time a Ruler is added to or removed from the ring. A full sharding-enabled Ruler example is: @@ -255,7 +256,7 @@ ruler: The Ruler supports five kinds of storage: azure, gcs, s3, swift, and local. Most kinds of storage work with the sharded Ruler configuration in an obvious way, i.e. configure all Rulers to use the same backend. -The local implementation reads the rule files off of the local filesystem. This is a read-only backend that does not support the creation and deletion of rules through the [Ruler API](../api/#ruler). Despite the fact that it reads the local filesystem this method can still be used in a sharded Ruler configuration if the operator takes care to load the same rules to every Ruler. For instance, this could be accomplished by mounting a [Kubernetes ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) onto every Ruler pod. +The local implementation reads the rule files off of the local filesystem. This is a read-only backend that does not support the creation and deletion of rules through the [Ruler API]({{}}). Despite the fact that it reads the local filesystem this method can still be used in a sharded Ruler configuration if the operator takes care to load the same rules to every Ruler. For instance, this could be accomplished by mounting a [Kubernetes ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) onto every Ruler pod. A typical local configuration might look something like: ``` @@ -268,7 +269,7 @@ With the above configuration, the Ruler would expect the following layout: /tmp/loki/rules//rules1.yaml /rules2.yaml ``` -Yaml files are expected to be [Prometheus compatible](#Prometheus_Compatible) but include LogQL expressions as specified in the beginning of this doc. +Yaml files are expected to be [Prometheus-compatible](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) but include LogQL expressions as specified in the beginning of this doc. ## Future improvements diff --git a/docs/sources/storage/_index.md b/docs/sources/storage/_index.md index 57385c224f..7538f772ab 100644 --- a/docs/sources/storage/_index.md +++ b/docs/sources/storage/_index.md @@ -1,5 +1,6 @@ --- title: Storage +description: Storage weight: 1010 --- # Storage @@ -55,7 +56,7 @@ As of 2.0, this is the recommended index storage type, performance is comparable ### Cassandra -Cassandra can also be utilized for the index store and aside from the [boltdb-shipper](../operations/storage/boltdb-shipper/), it's the only non-cloud offering that can be used for the index that's horizontally scalable and has configurable replication. It's a good candidate when you already run Cassandra, are running on-prem, or do not wish to use a managed cloud offering. +Cassandra can also be utilized for the index store and aside from the [boltdb-shipper]({{}}), it's the only non-cloud offering that can be used for the index that's horizontally scalable and has configurable replication. It's a good candidate when you already run Cassandra, are running on-prem, or do not wish to use a managed cloud offering. ### BigTable @@ -71,7 +72,7 @@ DynamoDB is susceptible to rate limiting, particularly due to overconsuming what ### BoltDB -BoltDB is an embedded database on disk. It is not replicated and thus cannot be used for high availability or clustered Loki deployments, but is commonly paired with a `filesystem` chunk store for proof of concept deployments, trying out Loki, and development. The [boltdb-shipper](../operations/storage/boltdb-shipper/) aims to support clustered deployments using `boltdb` as an index. +BoltDB is an embedded database on disk. It is not replicated and thus cannot be used for high availability or clustered Loki deployments, but is commonly paired with a `filesystem` chunk store for proof of concept deployments, trying out Loki, and development. The [boltdb-shipper]({{}}) aims to support clustered deployments using `boltdb` as an index. ### Azure Storage Account @@ -115,7 +116,7 @@ table_manager: retention_period: 2520h ``` -For more information, see the [table manager](../operations/storage/table-manager/) documentation. +For more information, see the [table manager]({{}}) documentation. ### Provisioning @@ -134,13 +135,13 @@ table_manager: inactive_read_throughput: | Default = 300 ``` -Note, there are a few other DynamoDB provisioning options including DynamoDB autoscaling and on-demand capacity. See the [provisioning configuration](../configuration/#provision_config) documentation for more information. +Note, there are a few other DynamoDB provisioning options including DynamoDB autoscaling and on-demand capacity. See the [provisioning configuration]({{}}) in the `table_manager` block documentation for more information. ## Upgrading Schemas When a new schema is released and you want to gain the advantages it provides, you can! Loki can transparently query & merge data from across schema boundaries so there is no disruption of service and upgrading is easy. -First, you'll want to create a new [period_config](../configuration#period_config) entry in your [schema_config](../configuration#schema_config). The important thing to remember here is to set this at some point in the _future_ and then roll out the config file changes to Loki. This allows the table manager to create the required table in advance of writes and ensures that existing data isn't queried as if it adheres to the new schema. +First, you'll want to create a new [period_config]({{}}) entry in your [schema_config]({{}}). The important thing to remember here is to set this at some point in the _future_ and then roll out the config file changes to Loki. This allows the table manager to create the required table in advance of writes and ensures that existing data isn't queried as if it adheres to the new schema. As an example, let's say it's 2020-07-14 and we want to start using the `v11` schema on the 20th: ```yaml @@ -170,7 +171,7 @@ With the exception of the `filesystem` chunk store, Loki will not delete old chu We're interested in adding targeted deletion in future Loki releases (think tenant or stream level granularity) and may include other strategies as well. -For more information, see the [retention configuration](../operations/storage/retention/) documentation. +For more information, see the [retention configuration]({{}}) documentation. ## Examples diff --git a/docs/sources/tools/_index.md b/docs/sources/tools/_index.md index 1af0e56990..07aae77854 100644 --- a/docs/sources/tools/_index.md +++ b/docs/sources/tools/_index.md @@ -1,7 +1,8 @@ --- title: Tools +description: Tools weight: 1050 --- # Tools -- [LogCLI](logcli/) +- [LogCLI]({{}}) diff --git a/docs/sources/tools/logcli.md b/docs/sources/tools/logcli.md index 2fa447bc0c..06e0245a4a 100644 --- a/docs/sources/tools/logcli.md +++ b/docs/sources/tools/logcli.md @@ -1,10 +1,11 @@ --- title: LogCLI +description: LogCLI, Grafana Loki's command-line interface weight: 20 aliases: - /docs/loki/latest/getting-started/logcli/ --- -# LogCLI, Grafana Loki's command-line interface +# LogCLI LogCLI is the command-line interface to Grafana Loki. It facilitates running [LogQL]({{< relref "../logql/_index.md" >}}) diff --git a/docs/sources/upgrading/_index.md b/docs/sources/upgrading/_index.md index 1f625ebf07..ea45b06a9e 100644 --- a/docs/sources/upgrading/_index.md +++ b/docs/sources/upgrading/_index.md @@ -1,9 +1,10 @@ --- title: Upgrading +description: Upgrading Grafana Loki weight: 250 --- -# Upgrading Grafana Loki +# Upgrading Every attempt is made to keep Grafana Loki backwards compatible, such that upgrades should be low risk and low friction. @@ -61,7 +62,7 @@ ruler: #### query-frontend k8s headless service changed to load balanced service -*Note:* This is relevant only if you are using [jsonnet for deploying Loki in Kubernetes](https://grafana.com/docs/loki/latest/installation/tanka/) +*Note:* This is relevant only if you are using [jsonnet for deploying Loki in Kubernetes](/docs/loki/latest/installation/tanka/) The `query-frontend` k8s service was previously headless and was used for two purposes: * Distributing the Loki query requests amongst all the available Query Frontend pods. @@ -100,14 +101,14 @@ These statistics are also displayed when using `--stats` with LogCLI. ### Loki Canary Permission -The new `push` mode to [Loki canary](https://grafana.com/docs/loki/latest/operations/loki-canary/) can push logs that are generated by a Loki canary directly to a given Loki URL. Previously, it only wrote to a local file and you needed some agent, such as promtail, to scrape and push it to Loki. +The new `push` mode to [Loki canary](/docs/loki/latest/operations/loki-canary/) can push logs that are generated by a Loki canary directly to a given Loki URL. Previously, it only wrote to a local file and you needed some agent, such as promtail, to scrape and push it to Loki. So if you run Loki behind some proxy with different authorization policies to read and write to Loki, then auth credentials we pass to Loki canary now needs to have both `READ` and `WRITE` permissions. ### `engine.timeout` and `querier.query_timeout` are deprecated Previously, we had two configurations to define a query timeout: `engine.timeout` and `querier.query-timeout`. As they were conflicting and `engine.timeout` isn't as expressive as `querier.query-tiomeout`, -we're deprecating it and moving it to [Limits Config](https://grafana.com/docs/loki/latest/configuration/#limits_config) `limits_config.query_timeout` with same default values. +we're deprecating it and moving it to [Limits Config](/docs/loki/latest/configuration/#limits_config) `limits_config.query_timeout` with same default values. #### `fifocache` has been renamed @@ -289,7 +290,7 @@ This histogram reports the distribution of log line sizes by file. It has 8 buck This creates a lot of series and we don't think this metric has enough value to offset the amount of series genereated so we are removing it. -While this isn't a direct replacement, two metrics we find more useful are size and line counters configured via pipeline stages, an example of how to configure these metrics can be found in the [metrics pipeline stage docs](https://grafana.com/docs/loki/latest/clients/promtail/stages/metrics/#counter) +While this isn't a direct replacement, two metrics we find more useful are size and line counters configured via pipeline stages, an example of how to configure these metrics can be found in the [metrics pipeline stage docs](/docs/loki/latest/clients/promtail/stages/metrics/#counter) #### `added Docker target` log message has been demoted from level=error to level=info @@ -343,7 +344,7 @@ limits_config: retention_period: [30d] ``` -See the [retention docs](../operations/storage/retention) for more info. +See the [retention docs]({{}}) for more info. #### Log messages on startup: proto: duplicate proto type registered: @@ -540,10 +541,10 @@ cortex_chunks_store* -> loki_chunks_store* Previously, samples generated by recording rules would only be buffered in memory before being remote-written to Prometheus; from this version, the `ruler` now writes these samples to a per-tenant Write-Ahead Log for durability. More details about the -per-tenant WAL can be found [here](https://grafana.com/docs/loki/latest/operations/recording-rules/). +per-tenant WAL can be found [here](/docs/loki/latest/operations/recording-rules/). The `ruler` now requires persistent storage - please see the -[Operations](https://grafana.com/docs/loki/latest/operations/recording-rules/#deployment) page for more details about deployment. +[Operations](/docs/loki/latest/operations/recording-rules/#deployment) page for more details about deployment. ### Promtail