Rename Tempo to Loki. (#36)

* Rename Tempo to Loki.

Signed-off-by: Tom Wilkie <tom.wilkie@gmail.com>

* Use new build image, don't delete generated files on clean.

Signed-off-by: Tom Wilkie <tom.wilkie@gmail.com>
pull/39/head
Tom Wilkie 7 years ago committed by GitHub
parent 2bed54ff2b
commit 5f73598389
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 20
      .circleci/config.yml
  2. 4
      .gitignore
  3. 26
      Makefile
  4. 30
      README.md
  5. 2
      cmd/logcli/client.go
  6. 2
      cmd/logcli/labels.go
  7. 2
      cmd/logcli/main.go
  8. 6
      cmd/logcli/query.go
  9. 4
      cmd/logcli/tail.go
  10. 4
      cmd/loki/Dockerfile
  11. 14
      cmd/loki/main.go
  12. 8
      cmd/promtail/main.go
  13. 4
      cmd/tempo/Dockerfile
  14. 0
      docs/loki-local-config.yaml
  15. 0
      loki-build-image/Dockerfile
  16. 2
      loki-build-image/build.sh
  17. 14
      mixin/alerts.libsonnet
  18. 72
      mixin/dashboards.libsonnet
  19. 10
      mixin/recording_rules.libsonnet
  20. 4
      pkg/chunkenc/dumb_chunk.go
  21. 4
      pkg/chunkenc/gzip.go
  22. 2
      pkg/chunkenc/gzip_test.go
  23. 4
      pkg/chunkenc/interface.go
  24. 10
      pkg/distributor/distributor.go
  25. 2
      pkg/distributor/http.go
  26. 6
      pkg/ingester/chunk_test.go
  27. 2
      pkg/ingester/client/client.go
  28. 2
      pkg/ingester/ingester.go
  29. 14
      pkg/ingester/instance.go
  30. 12
      pkg/ingester/stream.go
  31. 4
      pkg/ingester/stream_test.go
  32. 4
      pkg/iter/iterator.go
  33. 2
      pkg/iter/iterator_test.go
  34. 2
      pkg/loki/fake_auth.go
  35. 44
      pkg/loki/loki.go
  36. 40
      pkg/loki/modules.go
  37. 1
      pkg/parser/labels.go
  38. 2
      pkg/promtail/client.go
  39. 2
      pkg/promtail/target.go
  40. 2
      pkg/promtail/targetmanager.go
  41. 2
      pkg/querier/http.go
  42. 8
      pkg/querier/querier.go

@ -22,8 +22,8 @@ workflows:
# https://circleci.com/blog/circleci-hacks-reuse-yaml-in-your-circleci-config-with-yaml/
defaults: &defaults
docker:
- image: grafana/tempo-build-image:checkin-generated-files-86c363c
working_directory: /go/src/github.com/grafana/tempo
- image: grafana/loki-build-image:rename-49e21d5-WIP
working_directory: /go/src/github.com/grafana/loki
jobs:
test:
@ -34,7 +34,7 @@ jobs:
- run:
name: Run Unit Tests
command: |
touch tempo-build-image/.uptodate &&
touch loki-build-image/.uptodate &&
make BUILD_IN_CONTAINER=false test
lint:
@ -45,13 +45,13 @@ jobs:
- run:
name: Lint
command: |
touch tempo-build-image/.uptodate &&
touch loki-build-image/.uptodate &&
make BUILD_IN_CONTAINER=false lint
- run:
name: Check Generated Fies
command: |
touch tempo-build-image/.uptodate &&
touch loki-build-image/.uptodate &&
make BUILD_IN_CONTAINER=false check-generated-files
build:
@ -63,17 +63,17 @@ jobs:
- run:
name: Build Images
command: |
touch tempo-build-image/.uptodate &&
touch loki-build-image/.uptodate &&
make BUILD_IN_CONTAINER=false
- run:
name: Save Images
command: |
touch tempo-build-image/.uptodate &&
touch loki-build-image/.uptodate &&
make BUILD_IN_CONTAINER=false save-images
- save_cache:
key: v1-tempo-{{ .Branch }}-{{ .Revision }}
key: v1-loki-{{ .Branch }}-{{ .Revision }}
paths:
- images/
@ -84,12 +84,12 @@ jobs:
- setup_remote_docker
- restore_cache:
key: v1-tempo-{{ .Branch }}-{{ .Revision }}
key: v1-loki-{{ .Branch }}-{{ .Revision }}
- run:
name: Load Images
command: |
touch tempo-build-image/.uptodate &&
touch loki-build-image/.uptodate &&
make BUILD_IN_CONTAINER=false load-images
- run:

4
.gitignore vendored

@ -3,7 +3,7 @@
.cache
*.output
mixin/vendor/
cmd/tempo/tempo
cmd/loki/loki
cmd/promtail/promtail
/tempo
/loki
/promtail

@ -8,7 +8,7 @@ IMAGE_TAG := $(shell ./tools/image-tag)
UPTODATE := .uptodate
# Building Docker images is now automated. The convention is every directory
# with a Dockerfile in it builds an image calls quay.io/grafana/tempo-<dirname>.
# with a Dockerfile in it builds an image calls quay.io/grafana/loki-<dirname>.
# Dependencies (i.e. things that go in the image) still need to be explicitly
# declared.
%/$(UPTODATE): %/Dockerfile
@ -17,7 +17,7 @@ UPTODATE := .uptodate
touch $@
# We don't want find to scan inside a bunch of directories, to accelerate the
# 'make: Entering directory '/go/src/github.com/grafana/tempo' phase.
# 'make: Entering directory '/go/src/github.com/grafana/loki' phase.
DONT_FIND := -name tools -prune -o -name vendor -prune -o -name .git -prune -o -name .cache -prune -o -name .pkg -prune -o
# Get a list of directories containing Dockerfiles
@ -62,7 +62,7 @@ protos: $(PROTO_GOS)
yacc: $(YACC_GOS)
# And now what goes into each image
tempo-build-image/$(UPTODATE): tempo-build-image/*
loki-build-image/$(UPTODATE): loki-build-image/*
# All the boiler plate for building golang follows:
SUDO := $(shell docker info >/dev/null 2>&1 || echo "sudo -E")
@ -87,22 +87,22 @@ NETGO_CHECK = @strings $@ | grep cgo_stub\\\.go >/dev/null || { \
ifeq ($(BUILD_IN_CONTAINER),true)
$(EXES) $(PROTO_GOS) $(YACC_GOS) lint test shell check-generated-files: tempo-build-image/$(UPTODATE)
$(EXES) $(PROTO_GOS) $(YACC_GOS) lint test shell check-generated-files: loki-build-image/$(UPTODATE)
@mkdir -p $(shell pwd)/.pkg
@mkdir -p $(shell pwd)/.cache
$(SUDO) docker run $(RM) $(TTY) -i \
-v $(shell pwd)/.cache:/go/cache \
-v $(shell pwd)/.pkg:/go/pkg \
-v $(shell pwd):/go/src/github.com/grafana/tempo \
$(IMAGE_PREFIX)tempo-build-image $@;
-v $(shell pwd):/go/src/github.com/grafana/loki \
$(IMAGE_PREFIX)loki-build-image $@;
else
$(EXES): tempo-build-image/$(UPTODATE)
$(EXES): loki-build-image/$(UPTODATE)
go build $(GO_FLAGS) -o $@ ./$(@D)
$(NETGO_CHECK)
%.pb.go: tempo-build-image/$(UPTODATE)
%.pb.go: loki-build-image/$(UPTODATE)
case "$@" in \
vendor*) \
protoc -I ./vendor:./$(@D) --gogoslick_out=plugins=grpc:./vendor ./$(patsubst %.pb.go,%.proto,$@); \
@ -115,16 +115,16 @@ $(EXES): tempo-build-image/$(UPTODATE)
%.go: %.y
goyacc -p $(basename $(notdir $<)) -o $@ $<
lint: tempo-build-image/$(UPTODATE)
lint: loki-build-image/$(UPTODATE)
gometalinter ./...
check-generated-files: tempo-build-image/$(UPTODATE) yacc protos
check-generated-files: loki-build-image/$(UPTODATE) yacc protos
@git diff-files || (echo "changed files; failing check" && exit 1)
test: tempo-build-image/$(UPTODATE)
test: loki-build-image/$(UPTODATE)
go test ./...
shell: tempo-build-image/$(UPTODATE)
shell: loki-build-image/$(UPTODATE)
bash
endif
@ -157,5 +157,5 @@ push-images:
clean:
$(SUDO) docker rmi $(IMAGE_NAMES) >/dev/null 2>&1 || true
rm -rf $(UPTODATE_FILES) $(EXES) $(PROTO_GOS) $(YACC_GOS) .cache
rm -rf $(UPTODATE_FILES) $(EXES) .cache
go clean ./...

@ -1,24 +1,24 @@
# Tempo: Like Prometheus, but for logs.
# Loki: Like Prometheus, but for logs.
[![CircleCI](https://circleci.com/gh/grafana/tempo/tree/master.svg?style=svg&circle-token=618193e5787b2951c1ea3352ad5f254f4f52313d)](https://circleci.com/gh/grafana/tempo/tree/master) [Design doc](https://docs.google.com/document/d/11tjK_lvp1-SVsFZjgOTr1vV3-q6vBAsZYIQ5ZeYBkyM/edit)
[![CircleCI](https://circleci.com/gh/grafana/loki/tree/master.svg?style=svg&circle-token=618193e5787b2951c1ea3352ad5f254f4f52313d)](https://circleci.com/gh/grafana/loki/tree/master) [Design doc](https://docs.google.com/document/d/11tjK_lvp1-SVsFZjgOTr1vV3-q6vBAsZYIQ5ZeYBkyM/edit)
Tempo is a horizontally-scalable, highly-available, multi-tenant, log aggregation
Loki is a horizontally-scalable, highly-available, multi-tenant, log aggregation
system inspired by Prometheus. It is designed to be very cost effective, as it does
not index the contents of the logs, but rather a set of labels for each log stream.
## Run it locally
Tempo can be run in a single host, no-dependencies mode using the following commands.
Loki can be run in a single host, no-dependencies mode using the following commands.
Tempo consists of 3 components; `tempo` is the main server, responsible for storing
Loki consists of 3 components; `loki` is the main server, responsible for storing
logs and processing queries. `promtail` is the agent, responsible for gather logs
and sending them to tempo and `grafana` as the UI.
and sending them to loki and `grafana` as the UI.
To run tempo, use the following commands:
To run loki, use the following commands:
```
$ go build ./cmd/tempo
$ ./tempo -config.file=./docs/tempo-local-config.yaml
$ go build ./cmd/loki
$ ./loki -config.file=./docs/loki-local-config.yaml
...
```
@ -30,7 +30,7 @@ $ ./promtail -config.file=./docs/promtail-local-config.yaml
...
```
Grafana is Tempo's UI, so you'll also want to run one of those:
Grafana is Loki's UI, so you'll also want to run one of those:
```
$ docker run -ti -p 3000:3000 -e "GF_EXPLORE_ENABLED=true" grafana/grafana-dev:master-377eaa891c1eefdec9c83a2ee4dcf5c81665ab1f
@ -40,12 +40,12 @@ In the Grafana UI (http://localhost:3000), loging with "admin"/"admin", add a ne
## Usage Instructions
Tempo is running in the ops-tools1 cluster. You can query logs from that cluster
Loki is running in the ops-tools1 cluster. You can query logs from that cluster
using the following commands:
```
$ go get github.com/grafana/tempo/cmd/logcli
$ . $GOPATH/src/github.com/grafana/tempo/env # env vars inc. URL, username etc
$ go get github.com/grafana/loki/cmd/logcli
$ . $GOPATH/src/github.com/grafana/loki/env # env vars inc. URL, username etc
$ logcli labels job
https://logs-dev-ops-tools1.grafana.net/api/prom/label/job/values
cortex-ops/consul
@ -58,14 +58,14 @@ Common labels: {job="cortex-ops/consul", namespace="cortex-ops"}
2018-06-25T12:52:09Z {instance="consul-8576459955-pl75w"} 2018/06/25 12:52:09 [INFO] raft: Compacting logs from 456973 to 465169
```
The `logcli` command is temporary until we have Grafana integration. The URLs of
The `logcli` command is lokirary until we have Grafana integration. The URLs of
the requests are printed to help with integration work.
```
$ logcli help
usage: logcli [<flags>] <command> [<args> ...]
A command-line for tempo.
A command-line for loki.
Flags:
--help Show context-sensitive help (also try --help-long and --help-man).

@ -8,7 +8,7 @@ import (
"net/url"
"time"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/logproto"
)
const (

@ -4,7 +4,7 @@ import (
"fmt"
"log"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/logproto"
)
func doLabels() {

@ -7,7 +7,7 @@ import (
)
var (
app = kingpin.New("logcli", "A command-line for tempo.")
app = kingpin.New("logcli", "A command-line for loki.")
addr = app.Flag("addr", "Server address.").Default("https://log-us.grafana.net").Envar("GRAFANA_ADDR").String()
username = app.Flag("username", "Username for HTTP basic auth.").Default("").Envar("GRAFANA_USERNAME").String()
password = app.Flag("password", "Password for HTTP basic auth.").Default("").Envar("GRAFANA_PASSWORD").String()

@ -9,9 +9,9 @@ import (
"github.com/fatih/color"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/grafana/tempo/pkg/iter"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/tempo/pkg/parser"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/parser"
)
func doQuery() {

@ -3,8 +3,8 @@ package main
import (
"time"
"github.com/grafana/tempo/pkg/iter"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
)
const tailIteratorIncrement = 10 * time.Second

@ -0,0 +1,4 @@
FROM alpine:3.4
COPY loki /bin/loki
EXPOSE 80
ENTRYPOINT [ "/bin/loki" ]

@ -5,8 +5,8 @@ import (
"os"
"github.com/go-kit/kit/log/level"
"github.com/grafana/tempo/pkg/helpers"
"github.com/grafana/tempo/pkg/tempo"
"github.com/grafana/loki/pkg/helpers"
"github.com/grafana/loki/pkg/loki"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
@ -14,7 +14,7 @@ import (
func main() {
var (
cfg tempo.Config
cfg loki.Config
configFile = ""
)
flag.StringVar(&configFile, "config.file", "", "Configuration file to load.")
@ -30,18 +30,18 @@ func main() {
}
}
t, err := tempo.New(cfg)
t, err := loki.New(cfg)
if err != nil {
level.Error(util.Logger).Log("msg", "error initialising tempo", "err", err)
level.Error(util.Logger).Log("msg", "error initialising loki", "err", err)
os.Exit(1)
}
if err := t.Run(); err != nil {
level.Error(util.Logger).Log("msg", "error running tempo", "err", err)
level.Error(util.Logger).Log("msg", "error running loki", "err", err)
}
if err := t.Stop(); err != nil {
level.Error(util.Logger).Log("msg", "error stopping tempo", "err", err)
level.Error(util.Logger).Log("msg", "error stopping loki", "err", err)
os.Exit(1)
}
}

@ -9,8 +9,8 @@ import (
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
"github.com/grafana/tempo/pkg/helpers"
"github.com/grafana/tempo/pkg/promtail"
"github.com/grafana/loki/pkg/helpers"
"github.com/grafana/loki/pkg/promtail"
)
func main() {
@ -33,12 +33,12 @@ func main() {
p, err := promtail.New(config)
if err != nil {
level.Error(util.Logger).Log("msg", "error creating tempo", "error", err)
level.Error(util.Logger).Log("msg", "error creating loki", "error", err)
os.Exit(1)
}
if err := p.Run(); err != nil {
level.Error(util.Logger).Log("msg", "error starting tempo", "error", err)
level.Error(util.Logger).Log("msg", "error starting loki", "error", err)
os.Exit(1)
}

@ -1,4 +0,0 @@
FROM alpine:3.4
COPY tempo /bin/tempo
EXPOSE 80
ENTRYPOINT [ "/bin/tempo" ]

@ -2,7 +2,7 @@
set -eu
SRC_PATH=$GOPATH/src/github.com/grafana/tempo
SRC_PATH=$GOPATH/src/github.com/grafana/loki
# If we run make directly, any files created on the bind mount
# will have awkward ownership. So we switch to a user with the

@ -2,14 +2,14 @@
prometheusAlerts+:: {
groups+: [
{
name: 'tempo_alerts',
name: 'loki_alerts',
rules: [
{
alert: 'TempoRequestErrors',
alert: 'LokiRequestErrors',
expr: |||
100 * sum(rate(tempo_request_duration_seconds_count{status_code=~"5.."}[1m])) by (namespace, job, route)
100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[1m])) by (namespace, job, route)
/
sum(rate(tempo_request_duration_seconds_count[1m])) by (namespace, job, route)
sum(rate(loki_request_duration_seconds_count[1m])) by (namespace, job, route)
> 10
|||,
'for': '15m',
@ -23,9 +23,9 @@
},
},
{
alert: 'TempoRequestLatency',
alert: 'LokiRequestLatency',
expr: |||
namespace_job_route:tempo_request_duration_seconds:99quantile > 1
namespace_job_route:loki_request_duration_seconds:99quantile > 1
|||,
'for': '15m',
labels: {
@ -40,7 +40,7 @@
],
},
{
name: 'tempo_frontend_alerts',
name: 'loki_frontend_alerts',
rules: [
{
alert: 'FrontendRequestErrors',

@ -2,10 +2,10 @@ local g = import 'grafana-builder/grafana.libsonnet';
{
dashboards+: {
'tempo-writes.json':
g.dashboard('Tempo / Writes')
.addTemplate('cluster', 'kube_pod_container_info{image=~".*tempo.*"}', 'cluster')
.addTemplate('namespace', 'kube_pod_container_info{image=~".*tempo.*"}', 'namespace')
'loki-writes.json':
g.dashboard('Loki / Writes')
.addTemplate('cluster', 'kube_pod_container_info{image=~".*loki.*"}', 'cluster')
.addTemplate('namespace', 'kube_pod_container_info{image=~".*loki.*"}', 'namespace')
.addRow(
g.row('Frontend (cortex_gw)')
.addPanel(
@ -21,29 +21,29 @@ local g = import 'grafana-builder/grafana.libsonnet';
g.row('Distributor')
.addPanel(
g.panel('QPS') +
g.qpsPanel('tempo_request_duration_seconds_count{cluster="$cluster", job="$namespace/distributor", route="api_prom_push"}')
g.qpsPanel('loki_request_duration_seconds_count{cluster="$cluster", job="$namespace/distributor", route="api_prom_push"}')
)
.addPanel(
g.panel('Latency') +
g.latencyRecordingRulePanel('tempo_request_duration_seconds', [g.selector.eq('job', '$namespace/distributor'), g.selector.eq('route', 'api_prom_push')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
g.latencyRecordingRulePanel('loki_request_duration_seconds', [g.selector.eq('job', '$namespace/distributor'), g.selector.eq('route', 'api_prom_push')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
)
)
.addRow(
g.row('Ingester')
.addPanel(
g.panel('QPS') +
g.qpsPanel('tempo_request_duration_seconds_count{cluster="$cluster", job="$namespace/ingester",route="/logproto.Pusher/Push"}')
g.qpsPanel('loki_request_duration_seconds_count{cluster="$cluster", job="$namespace/ingester",route="/logproto.Pusher/Push"}')
)
.addPanel(
g.panel('Latency') +
g.latencyRecordingRulePanel('tempo_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.eq('route', '/logproto.Pusher/Push')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
g.latencyRecordingRulePanel('loki_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.eq('route', '/logproto.Pusher/Push')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
)
),
'tempo-reads.json':
g.dashboard('tempo / Reads')
.addTemplate('cluster', 'kube_pod_container_info{image=~".*tempo.*"}', 'cluster')
.addTemplate('namespace', 'kube_pod_container_info{image=~".*tempo.*"}', 'namespace')
'loki-reads.json':
g.dashboard('loki / Reads')
.addTemplate('cluster', 'kube_pod_container_info{image=~".*loki.*"}', 'cluster')
.addTemplate('namespace', 'kube_pod_container_info{image=~".*loki.*"}', 'namespace')
.addRow(
g.row('Frontend (cortex_gw)')
.addPanel(
@ -59,83 +59,83 @@ local g = import 'grafana-builder/grafana.libsonnet';
g.row('Querier')
.addPanel(
g.panel('QPS') +
g.qpsPanel('tempo_request_duration_seconds_count{cluster="$cluster", job="$namespace/querier"}')
g.qpsPanel('loki_request_duration_seconds_count{cluster="$cluster", job="$namespace/querier"}')
)
.addPanel(
g.panel('Latency') +
g.latencyRecordingRulePanel('tempo_request_duration_seconds', [g.selector.eq('job', '$namespace/querier')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
g.latencyRecordingRulePanel('loki_request_duration_seconds', [g.selector.eq('job', '$namespace/querier')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
)
)
.addRow(
g.row('Ingester')
.addPanel(
g.panel('QPS') +
g.qpsPanel('tempo_request_duration_seconds_count{cluster="$cluster", job="$namespace/ingester",route!~"/logproto.Pusher/Push|metrics|ready|traces"}')
g.qpsPanel('loki_request_duration_seconds_count{cluster="$cluster", job="$namespace/ingester",route!~"/logproto.Pusher/Push|metrics|ready|traces"}')
)
.addPanel(
g.panel('Latency') +
g.latencyRecordingRulePanel('tempo_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.nre('route', '/logproto.Pusher/Push|metrics|ready')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
g.latencyRecordingRulePanel('loki_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.nre('route', '/logproto.Pusher/Push|metrics|ready')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
)
),
'tempo-chunks.json':
g.dashboard('Tempo / Chunks')
.addTemplate('cluster', 'kube_pod_container_info{image=~".*tempo.*"}', 'cluster')
.addTemplate('namespace', 'kube_pod_container_info{image=~".*tempo.*"}', 'namespace')
'loki-chunks.json':
g.dashboard('Loki / Chunks')
.addTemplate('cluster', 'kube_pod_container_info{image=~".*loki.*"}', 'cluster')
.addTemplate('namespace', 'kube_pod_container_info{image=~".*loki.*"}', 'namespace')
.addRow(
g.row('Active Series / Chunks')
.addPanel(
g.panel('Series') +
g.queryPanel('sum(tempo_ingester_memory_chunks{cluster="$cluster", job="$namespace/ingester"})', 'series'),
g.queryPanel('sum(loki_ingester_memory_chunks{cluster="$cluster", job="$namespace/ingester"})', 'series'),
)
.addPanel(
g.panel('Chunks per series') +
g.queryPanel('sum(tempo_ingester_memory_chunks{cluster="$cluster", job="$namespace/ingester"}) / sum(tempo_ingester_memory_series{job="$namespace/ingester"})', 'chunks'),
g.queryPanel('sum(loki_ingester_memory_chunks{cluster="$cluster", job="$namespace/ingester"}) / sum(loki_ingester_memory_series{job="$namespace/ingester"})', 'chunks'),
)
)
.addRow(
g.row('Flush Stats')
.addPanel(
g.panel('Utilization') +
g.latencyPanel('tempo_ingester_chunk_utilization', '{cluster="$cluster", job="$namespace/ingester"}', multiplier='1') +
g.latencyPanel('loki_ingester_chunk_utilization', '{cluster="$cluster", job="$namespace/ingester"}', multiplier='1') +
{ yaxes: g.yaxes('percentunit') },
)
.addPanel(
g.panel('Age') +
g.latencyPanel('tempo_ingester_chunk_age_seconds', '{cluster="$cluster", job="$namespace/ingester"}'),
g.latencyPanel('loki_ingester_chunk_age_seconds', '{cluster="$cluster", job="$namespace/ingester"}'),
),
)
.addRow(
g.row('Flush Stats')
.addPanel(
g.panel('Size') +
g.latencyPanel('tempo_ingester_chunk_length', '{cluster="$cluster", job="$namespace/ingester"}', multiplier='1') +
g.latencyPanel('loki_ingester_chunk_length', '{cluster="$cluster", job="$namespace/ingester"}', multiplier='1') +
{ yaxes: g.yaxes('short') },
)
.addPanel(
g.panel('Entries') +
g.queryPanel('sum(rate(tempo_chunk_store_index_entries_per_chunk_sum{cluster="$cluster", job="$namespace/ingester"}[5m])) / sum(rate(tempo_chunk_store_index_entries_per_chunk_count{cluster="$cluster", job="$namespace/ingester"}[5m]))', 'entries'),
g.queryPanel('sum(rate(loki_chunk_store_index_entries_per_chunk_sum{cluster="$cluster", job="$namespace/ingester"}[5m])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{cluster="$cluster", job="$namespace/ingester"}[5m]))', 'entries'),
),
)
.addRow(
g.row('Flush Stats')
.addPanel(
g.panel('Queue Length') +
g.queryPanel('tempo_ingester_flush_queue_length{cluster="$cluster", job="$namespace/ingester"}', '{{instance}}'),
g.queryPanel('loki_ingester_flush_queue_length{cluster="$cluster", job="$namespace/ingester"}', '{{instance}}'),
)
.addPanel(
g.panel('Flush Rate') +
g.qpsPanel('tempo_ingester_chunk_age_seconds_count{cluster="$cluster", job="$namespace/ingester"}'),
g.qpsPanel('loki_ingester_chunk_age_seconds_count{cluster="$cluster", job="$namespace/ingester"}'),
),
),
'tempo-frontend.json':
g.dashboard('Tempo / Frontend')
.addTemplate('cluster', 'kube_pod_container_info{image=~".*tempo.*"}', 'cluster')
.addTemplate('namespace', 'kube_pod_container_info{image=~".*tempo.*"}', 'namespace')
'loki-frontend.json':
g.dashboard('Loki / Frontend')
.addTemplate('cluster', 'kube_pod_container_info{image=~".*loki.*"}', 'cluster')
.addTemplate('namespace', 'kube_pod_container_info{image=~".*loki.*"}', 'namespace')
.addRow(
g.row('tempo Reqs (cortex_gw)')
g.row('loki Reqs (cortex_gw)')
.addPanel(
g.panel('QPS') +
g.qpsPanel('cortex_gw_request_duration_seconds_count{cluster="$cluster", job="$namespace/cortex-gw"}')
@ -146,9 +146,9 @@ local g = import 'grafana-builder/grafana.libsonnet';
)
),
'promtail.json':
g.dashboard('Tempo / Promtail')
.addTemplate('cluster', 'kube_pod_container_info{image=~".*tempo.*"}', 'cluster')
.addTemplate('namespace', 'kube_pod_container_info{image=~".*tempo.*"}', 'namespace')
g.dashboard('Loki / Promtail')
.addTemplate('cluster', 'kube_pod_container_info{image=~".*loki.*"}', 'cluster')
.addTemplate('namespace', 'kube_pod_container_info{image=~".*loki.*"}', 'namespace')
.addRow(
g.row('promtail Reqs')
.addPanel(

@ -22,13 +22,13 @@ local histogramRules(metric, labels) =
{
prometheus_rules+:: {
groups+: [{
name: 'tempo_rules',
name: 'loki_rules',
rules:
histogramRules('tempo_request_duration_seconds', ['job']) +
histogramRules('tempo_request_duration_seconds', ['job', 'route']) +
histogramRules('tempo_request_duration_seconds', ['namespace', 'job', 'route']),
histogramRules('loki_request_duration_seconds', ['job']) +
histogramRules('loki_request_duration_seconds', ['job', 'route']) +
histogramRules('loki_request_duration_seconds', ['namespace', 'job', 'route']),
}, {
name: 'tempo_frontend_rules',
name: 'loki_frontend_rules',
rules:
histogramRules('cortex_gw_request_duration_seconds', ['job']) +
histogramRules('cortex_gw_request_duration_seconds', ['job', 'route']) +

@ -5,8 +5,8 @@ import (
"sort"
"time"
"github.com/grafana/tempo/pkg/iter"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
)
const (

@ -11,9 +11,9 @@ import (
"math"
"time"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/tempo/pkg/iter"
"github.com/grafana/loki/pkg/iter"
"github.com/pkg/errors"
)

@ -8,7 +8,7 @@ import (
"testing"
"time"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/logproto"
"github.com/stretchr/testify/require"
)

@ -5,8 +5,8 @@ import (
"io"
"time"
"github.com/grafana/tempo/pkg/iter"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
)
// Errors returned by the chunk interface.

@ -14,24 +14,24 @@ import (
"github.com/weaveworks/common/user"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/tempo/pkg/ingester/client"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/ingester/client"
"github.com/grafana/loki/pkg/logproto"
)
var (
sendDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "tempo",
Namespace: "loki",
Name: "distributor_send_duration_seconds",
Help: "Time spent sending a sample batch to multiple replicated ingesters.",
Buckets: []float64{.001, .0025, .005, .01, .025, .05, .1, .25, .5, 1},
}, []string{"method", "status_code"})
ingesterAppends = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "tempo",
Namespace: "loki",
Name: "distributor_ingester_appends_total",
Help: "The total number of batch appends sent to ingesters.",
}, []string{"ingester"})
ingesterAppendFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "tempo",
Namespace: "loki",
Name: "distributor_ingester_append_failures_total",
Help: "The total number of failed batch appends sent to ingesters.",
}, []string{"ingester"})

@ -5,7 +5,7 @@ import (
"github.com/cortexproject/cortex/pkg/util"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/logproto"
)
// PushHandler reads a snappy-compressed proto from the HTTP body.

@ -6,9 +6,9 @@ import (
"testing"
"time"
"github.com/grafana/tempo/pkg/chunkenc"
"github.com/grafana/tempo/pkg/iter"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/chunkenc"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

@ -6,7 +6,7 @@ import (
"time"
cortex_client "github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/logproto"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/mwitkow/go-grpc-middleware"
opentracing "github.com/opentracing/opentracing-go"

@ -10,7 +10,7 @@ import (
"github.com/weaveworks/common/user"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/logproto"
)
// Config for an ingester.

@ -8,11 +8,11 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/grafana/tempo/pkg/helpers"
"github.com/grafana/tempo/pkg/iter"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/tempo/pkg/parser"
"github.com/grafana/tempo/pkg/querier"
"github.com/grafana/loki/pkg/helpers"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/parser"
"github.com/grafana/loki/pkg/querier"
)
const queryBatchSize = 128
@ -24,12 +24,12 @@ var (
var (
streamsCreatedTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "tempo",
Namespace: "loki",
Name: "ingester_streams_created_total",
Help: "The total number of streams created in the ingester.",
}, []string{"org"})
streamsRemovedTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "tempo",
Namespace: "loki",
Name: "ingester_streams_removed_total",
Help: "The total number of streams removed by the ingester.",
}, []string{"org"})

@ -7,25 +7,25 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/grafana/tempo/pkg/chunkenc"
"github.com/grafana/tempo/pkg/iter"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/chunkenc"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
)
var (
chunksCreatedTotal = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "tempo",
Namespace: "loki",
Name: "ingester_chunks_created_total",
Help: "The total number of chunks created in the ingester.",
})
chunksFlushedTotal = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "tempo",
Namespace: "loki",
Name: "ingester_chunks_flushed_total",
Help: "The total number of chunks flushed by the ingester.",
})
samplesPerChunk = prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: "tempo",
Namespace: "loki",
Subsystem: "ingester",
Name: "samples_per_chunk",
Help: "The number of samples in a chunk.",

@ -6,8 +6,8 @@ import (
"testing"
"time"
"github.com/grafana/tempo/pkg/chunkenc"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/chunkenc"
"github.com/grafana/loki/pkg/logproto"
"github.com/stretchr/testify/require"
)

@ -7,8 +7,8 @@ import (
"regexp"
"time"
"github.com/grafana/tempo/pkg/helpers"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/helpers"
"github.com/grafana/loki/pkg/logproto"
)
// EntryIterator iterates over entries in time-order.

@ -5,7 +5,7 @@ import (
"testing"
"time"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/logproto"
"github.com/stretchr/testify/assert"
)

@ -1,4 +1,4 @@
package tempo
package loki
import (
"context"

@ -1,4 +1,4 @@
package tempo
package loki
import (
"flag"
@ -13,13 +13,13 @@ import (
"github.com/weaveworks/common/middleware"
"github.com/weaveworks/common/server"
"github.com/grafana/tempo/pkg/distributor"
"github.com/grafana/tempo/pkg/ingester"
"github.com/grafana/tempo/pkg/ingester/client"
"github.com/grafana/tempo/pkg/querier"
"github.com/grafana/loki/pkg/distributor"
"github.com/grafana/loki/pkg/ingester"
"github.com/grafana/loki/pkg/ingester/client"
"github.com/grafana/loki/pkg/querier"
)
// Config is the root config for Tempo.
// Config is the root config for Loki.
type Config struct {
Target moduleName `yaml:"target,omitempty"`
AuthEnabled bool `yaml:"auth_enabled,omitempty"`
@ -33,7 +33,7 @@ type Config struct {
// RegisterFlags registers flag.
func (c *Config) RegisterFlags(f *flag.FlagSet) {
c.Server.MetricsNamespace = "tempo"
c.Server.MetricsNamespace = "loki"
c.Target = All
f.Var(&c.Target, "target", "target module (default All)")
f.BoolVar(&c.AuthEnabled, "auth.enabled", true, "Set to false to disable auth.")
@ -45,8 +45,8 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) {
c.Ingester.RegisterFlags(f)
}
// Tempo is the root datastructure for Tempo.
type Tempo struct {
// Loki is the root datastructure for Loki.
type Loki struct {
cfg Config
server *server.Server
@ -60,23 +60,23 @@ type Tempo struct {
inited map[moduleName]struct{}
}
// New makes a new Tempo.
func New(cfg Config) (*Tempo, error) {
tempo := &Tempo{
// New makes a new Loki.
func New(cfg Config) (*Loki, error) {
loki := &Loki{
cfg: cfg,
inited: map[moduleName]struct{}{},
}
tempo.setupAuthMiddleware()
loki.setupAuthMiddleware()
if err := tempo.init(cfg.Target); err != nil {
if err := loki.init(cfg.Target); err != nil {
return nil, err
}
return tempo, nil
return loki, nil
}
func (t *Tempo) setupAuthMiddleware() {
func (t *Loki) setupAuthMiddleware() {
if t.cfg.AuthEnabled {
t.cfg.Server.GRPCMiddleware = []grpc.UnaryServerInterceptor{
middleware.ServerUserHeaderInterceptor,
@ -96,7 +96,7 @@ func (t *Tempo) setupAuthMiddleware() {
}
}
func (t *Tempo) init(m moduleName) error {
func (t *Loki) init(m moduleName) error {
if _, ok := t.inited[m]; ok {
return nil
}
@ -118,19 +118,19 @@ func (t *Tempo) init(m moduleName) error {
return nil
}
// Run starts Tempo running, and blocks until a signal is received.
func (t *Tempo) Run() error {
// Run starts Loki running, and blocks until a signal is received.
func (t *Loki) Run() error {
return t.server.Run()
}
// Stop gracefully stops a Tempo.
func (t *Tempo) Stop() error {
// Stop gracefully stops a Loki.
func (t *Loki) Stop() error {
t.server.Shutdown()
t.stop(t.cfg.Target)
return nil
}
func (t *Tempo) stop(m moduleName) {
func (t *Loki) stop(m moduleName) {
if _, ok := t.inited[m]; !ok {
return
}

@ -1,4 +1,4 @@
package tempo
package loki
import (
"fmt"
@ -13,15 +13,15 @@ import (
"github.com/weaveworks/common/middleware"
"github.com/weaveworks/common/server"
"github.com/grafana/tempo/pkg/distributor"
"github.com/grafana/tempo/pkg/ingester"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/tempo/pkg/querier"
"github.com/grafana/loki/pkg/distributor"
"github.com/grafana/loki/pkg/ingester"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/querier"
)
type moduleName int
// The various modules that make up Tempo.
// The various modules that make up Loki.
const (
Ring moduleName = iota
Server
@ -75,12 +75,12 @@ func (m *moduleName) Set(s string) error {
}
}
func (t *Tempo) initServer() (err error) {
func (t *Loki) initServer() (err error) {
t.server, err = server.New(t.cfg.Server)
return
}
func (t *Tempo) initRing() (err error) {
func (t *Loki) initRing() (err error) {
t.ring, err = ring.New(t.cfg.Ingester.LifecyclerConfig.RingConfig)
if err != nil {
return
@ -89,7 +89,7 @@ func (t *Tempo) initRing() (err error) {
return
}
func (t *Tempo) initDistributor() (err error) {
func (t *Loki) initDistributor() (err error) {
t.distributor, err = distributor.New(t.cfg.Distributor, t.cfg.IngesterClient, t.ring)
if err != nil {
return
@ -108,7 +108,7 @@ func (t *Tempo) initDistributor() (err error) {
return
}
func (t *Tempo) initQuerier() (err error) {
func (t *Loki) initQuerier() (err error) {
t.querier, err = querier.New(t.cfg.Querier, t.cfg.IngesterClient, t.ring)
if err != nil {
return
@ -129,7 +129,7 @@ func (t *Tempo) initQuerier() (err error) {
return
}
func (t *Tempo) initIngester() (err error) {
func (t *Loki) initIngester() (err error) {
t.cfg.Ingester.LifecyclerConfig.ListenPort = &t.cfg.Server.GRPCListenPort
t.ingester, err = ingester.New(t.cfg.Ingester)
if err != nil {
@ -143,41 +143,41 @@ func (t *Tempo) initIngester() (err error) {
return
}
func (t *Tempo) stopIngester() error {
func (t *Loki) stopIngester() error {
t.ingester.Shutdown()
return nil
}
type module struct {
deps []moduleName
init func(t *Tempo) error
stop func(t *Tempo) error
init func(t *Loki) error
stop func(t *Loki) error
}
var modules = map[moduleName]module{
Server: {
init: (*Tempo).initServer,
init: (*Loki).initServer,
},
Ring: {
deps: []moduleName{Server},
init: (*Tempo).initRing,
init: (*Loki).initRing,
},
Distributor: {
deps: []moduleName{Ring, Server},
init: (*Tempo).initDistributor,
init: (*Loki).initDistributor,
},
Ingester: {
deps: []moduleName{Server},
init: (*Tempo).initIngester,
stop: (*Tempo).stopIngester,
init: (*Loki).initIngester,
stop: (*Loki).stopIngester,
},
Querier: {
deps: []moduleName{Ring, Server},
init: (*Tempo).initQuerier,
init: (*Loki).initQuerier,
},
All: {

@ -6,6 +6,7 @@ package parser
import __yyfmt__ "fmt"
//line pkg/parser/labels.y:2
import (
"github.com/prometheus/prometheus/pkg/labels"
)

@ -18,7 +18,7 @@ import (
"github.com/cortexproject/cortex/pkg/util/flagext"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/logproto"
)
const contentType = "application/x-protobuf"

@ -13,7 +13,7 @@ import (
"github.com/prometheus/common/model"
fsnotify "gopkg.in/fsnotify.v1"
"github.com/grafana/tempo/pkg/helpers"
"github.com/grafana/loki/pkg/helpers"
)
var (

@ -7,7 +7,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/grafana/tempo/pkg/helpers"
"github.com/grafana/loki/pkg/helpers"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery"

@ -11,7 +11,7 @@ import (
"time"
"github.com/gorilla/mux"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/logproto"
)
const (

@ -9,10 +9,10 @@ import (
"github.com/cortexproject/cortex/pkg/util"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/tempo/pkg/helpers"
"github.com/grafana/tempo/pkg/ingester/client"
"github.com/grafana/tempo/pkg/iter"
"github.com/grafana/tempo/pkg/logproto"
"github.com/grafana/loki/pkg/helpers"
"github.com/grafana/loki/pkg/ingester/client"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
)
// Config for a querier.

Loading…
Cancel
Save