add a Name() method to the stage interface so that debug logging can show you the name of the pipeline stage which just processed the log

remove some unnecessary logging around fsnotify events we don't care about and saving positions
Make the Processing Log Lines doc a first class citizen, I reference this a lot and currently it's hidden behind 3 clicks
pull/788/head
Edward Welch 6 years ago committed by Ed
parent 3d5319e72a
commit 7deb02b532
  1. 1
      README.md
  2. 2
      docs/logentry/processing-log-lines.md
  3. 5
      pkg/logentry/stages/json.go
  4. 5
      pkg/logentry/stages/labels.go
  5. 5
      pkg/logentry/stages/match.go
  6. 5
      pkg/logentry/stages/metrics.go
  7. 5
      pkg/logentry/stages/output.go
  8. 7
      pkg/logentry/stages/pipeline.go
  9. 5
      pkg/logentry/stages/regex.go
  10. 2
      pkg/logentry/stages/stage.go
  11. 5
      pkg/logentry/stages/template.go
  12. 5
      pkg/logentry/stages/timestamp.go
  13. 2
      pkg/promtail/targets/filetarget.go
  14. 1
      pkg/promtail/targets/tailer.go

@ -37,6 +37,7 @@ Once you have promtail, Loki, and Grafana running, continue with [our usage docs
- [API documentation](./docs/api.md) for alternative ways of getting logs into Loki.
- [Operations](./docs/operations.md) for important aspects of running Loki.
- [Promtail](./docs/promtail.md) is an agent which can tail your log files and push them to Loki.
- [Processing Log Lines](./docs/logentry/processing-log-lines.md) for detailed log processing pipeline documentation
- [Docker Logging Driver](./cmd/docker-driver/README.md) is a docker plugin to send logs directly to Loki from Docker containers.
- [Logcli](./docs/logcli.md) on how to query your logs without Grafana.
- [Loki Canary](./docs/canary/README.md) for monitoring your Loki installation for missing logs.

@ -1,5 +1,7 @@
# Processing Log Lines
A detailed look at how to setup promtail to process your log lines, including extracting metrics and labels.
* [Pipeline](#pipeline)
* [Stages](#stages)

@ -153,3 +153,8 @@ func (j *jsonStage) Process(labels model.LabelSet, extracted map[string]interfac
}
}
// Name implements Stage
func (j *jsonStage) Name() string {
return StageTypeJSON
}

@ -80,3 +80,8 @@ func (l *labelStage) Process(labels model.LabelSet, extracted map[string]interfa
}
}
}
// Name implements Stage
func (l *labelStage) Name() string {
return StageTypeLabel
}

@ -93,3 +93,8 @@ func (m *matcherStage) Process(labels model.LabelSet, extracted map[string]inter
}
m.pipeline.Process(labels, extracted, t, entry)
}
// Name implements Stage
func (m *matcherStage) Name() string {
return StageTypeMatch
}

@ -130,6 +130,11 @@ func (m *metricStage) Process(labels model.LabelSet, extracted map[string]interf
}
}
// Name implements Stage
func (m *metricStage) Name() string {
return StageTypeMetric
}
// recordCounter will update a counter metric
func (m *metricStage) recordCounter(name string, counter *metric.Counters, labels model.LabelSet, v interface{}) {
// If value matching is defined, make sure value matches.

@ -72,3 +72,8 @@ func (o *outputStage) Process(labels model.LabelSet, extracted map[string]interf
level.Debug(o.logger).Log("msg", "extracted data did not contain output source")
}
}
// Name implements Stage
func (o *outputStage) Name() string {
return StageTypeOutput
}

@ -78,7 +78,7 @@ func NewPipeline(logger log.Logger, stgs PipelineStages, jobName *string, regist
func (p *Pipeline) Process(labels model.LabelSet, extracted map[string]interface{}, ts *time.Time, entry *string) {
start := time.Now()
for i, stage := range p.stages {
level.Debug(p.logger).Log("msg", "processing pipeline", "stage", i, "labels", labels, "time", ts, "entry", entry)
level.Debug(p.logger).Log("msg", "processing pipeline", "stage", i, "name", stage.Name(), "labels", labels, "time", ts, "entry", entry)
stage.Process(labels, extracted, ts, entry)
}
dur := time.Since(start).Seconds()
@ -88,6 +88,11 @@ func (p *Pipeline) Process(labels model.LabelSet, extracted map[string]interface
}
}
// Name implements Stage
func (p *Pipeline) Name() string {
return StageTypePipeline
}
// Wrap implements EntryMiddleware
func (p *Pipeline) Wrap(next api.EntryHandler) api.EntryHandler {
return api.EntryHandlerFunc(func(labels model.LabelSet, timestamp time.Time, line string) error {

@ -121,3 +121,8 @@ func (r *regexStage) Process(labels model.LabelSet, extracted map[string]interfa
}
}
// Name implements Stage
func (r *regexStage) Name() string {
return StageTypeRegex
}

@ -20,12 +20,14 @@ const (
StageTypeCRI = "cri"
StageTypeMatch = "match"
StageTypeTemplate = "template"
StageTypePipeline = "pipeline"
)
// Stage takes an existing set of labels, timestamp and log entry and returns either a possibly mutated
// timestamp and log entry
type Stage interface {
Process(labels model.LabelSet, extracted map[string]interface{}, time *time.Time, entry *string)
Name() string
}
// StageFunc is modelled on http.HandlerFunc.

@ -123,3 +123,8 @@ func (o *templateStage) Process(labels model.LabelSet, extracted map[string]inte
}
}
}
// Name implements Stage
func (o *templateStage) Name() string {
return StageTypeTemplate
}

@ -102,3 +102,8 @@ func (ts *timestampStage) Process(labels model.LabelSet, extracted map[string]in
level.Debug(ts.logger).Log("msg", "extracted data did not contain a timestamp")
}
}
// Name implements Stage
func (ts *timestampStage) Name() string {
return StageTypeTimestamp
}

@ -168,7 +168,7 @@ func (t *FileTarget) run() {
}
t.startTailing([]string{event.Name})
default:
level.Debug(t.logger).Log("msg", "got unknown event", "event", event)
// No-op we only care about Create events
}
case err := <-t.watcher.Errors:
level.Error(t.logger).Log("msg", "error from fswatch", "error", err)

@ -115,7 +115,6 @@ func (t *tailer) markPosition() error {
}
readBytes.WithLabelValues(t.path).Set(float64(pos))
level.Debug(t.logger).Log("path", t.path, "current_position", pos)
t.positions.Put(t.path, pos)
return nil
}

Loading…
Cancel
Save