consistent chunk metrics (#2534)

pull/2542/head
Owen Diehl 5 years ago committed by GitHub
parent b6d9fd5a00
commit 6fe414eb25
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 53
      pkg/storage/batch.go
  2. 9
      pkg/storage/store.go

@ -25,15 +25,15 @@ import (
) )
type ChunkMetrics struct { type ChunkMetrics struct {
refs prometheus.Counter refs *prometheus.CounterVec
filteredRefs prometheus.Counter series *prometheus.CounterVec
chunks *prometheus.CounterVec chunks *prometheus.CounterVec
batches prometheus.Histogram batches *prometheus.HistogramVec
} }
const ( const (
statusFiltered = "filtered" statusDiscarded = "discarded"
statusMatched = "matched" statusMatched = "matched"
) )
func NewChunkMetrics(r prometheus.Registerer, maxBatchSize int) *ChunkMetrics { func NewChunkMetrics(r prometheus.Registerer, maxBatchSize int) *ChunkMetrics {
@ -43,33 +43,33 @@ func NewChunkMetrics(r prometheus.Registerer, maxBatchSize int) *ChunkMetrics {
} }
return &ChunkMetrics{ return &ChunkMetrics{
refs: promauto.With(r).NewCounter(prometheus.CounterOpts{ refs: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: "loki", Namespace: "loki",
Subsystem: "index", Subsystem: "index",
Name: "chunk_refs_pre_filtering_total", Name: "chunk_refs_total",
Help: "Number of chunks refs downloaded.", Help: "Number of chunks refs downloaded, partitioned by whether they intersect the query bounds.",
}), }, []string{"status"}),
filteredRefs: promauto.With(r).NewCounter(prometheus.CounterOpts{ series: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: "loki", Namespace: "loki",
Subsystem: "index", Subsystem: "store",
Name: "chunk_refs_post_filtering_total", Name: "series_total",
Help: "Number of chunks refs downloaded whose bounds intersect the query bounds.", Help: "Number of series referenced by a query, partitioned by whether they satisfy matchers.",
}), }, []string{"status"}),
chunks: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ chunks: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: "loki", Namespace: "loki",
Subsystem: "store", Subsystem: "store",
Name: "chunks_downloaded_total", Name: "chunks_downloaded_total",
Help: "Number of chunks downloaded, partitioned by if they satisfy matchers.", Help: "Number of chunks downloaded, partitioned by if they satisfy matchers.",
}, []string{"status"}), }, []string{"status"}),
batches: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ batches: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: "loki", Namespace: "loki",
Subsystem: "store", Subsystem: "store",
Name: "chunks_per_batch_post_filtering", Name: "chunks_per_batch",
Help: "The post-matching chunk batch size.", Help: "The chunk batch size, partitioned by if they satisfy matchers.",
// split buckets evenly across 0->maxBatchSize // split buckets evenly across 0->maxBatchSize
Buckets: prometheus.LinearBuckets(0, float64(maxBatchSize/buckets), buckets), Buckets: prometheus.LinearBuckets(0, float64(maxBatchSize/buckets), buckets),
}), }, []string{"status"}),
} }
} }
@ -581,7 +581,9 @@ func fetchChunkBySeries(ctx context.Context, metrics *ChunkMetrics, chunks []*La
return nil, err return nil, err
} }
metrics.chunks.WithLabelValues(statusMatched).Add(float64(len(allChunks))) metrics.chunks.WithLabelValues(statusMatched).Add(float64(len(allChunks)))
metrics.batches.Observe(float64(len(allChunks))) metrics.series.WithLabelValues(statusMatched).Add(float64(len(chksBySeries)))
metrics.batches.WithLabelValues(statusMatched).Observe(float64(len(allChunks)))
metrics.batches.WithLabelValues(statusDiscarded).Observe(float64(len(chunks) - len(allChunks)))
return chksBySeries, nil return chksBySeries, nil
} }
@ -591,20 +593,25 @@ func filterSeriesByMatchers(
matchers []*labels.Matcher, matchers []*labels.Matcher,
metrics *ChunkMetrics, metrics *ChunkMetrics,
) map[model.Fingerprint][][]*LazyChunk { ) map[model.Fingerprint][][]*LazyChunk {
var filtered int // Number of chunks downlaoded to check labels, but filtered out after. var filteredSeries, filteredChks int
outer: outer:
for fp, chunks := range chks { for fp, chunks := range chks {
for _, matcher := range matchers { for _, matcher := range matchers {
if !matcher.Matches(chunks[0][0].Chunk.Metric.Get(matcher.Name)) { if !matcher.Matches(chunks[0][0].Chunk.Metric.Get(matcher.Name)) {
delete(chks, fp) delete(chks, fp)
filtered++ filteredSeries++
for _, grp := range chunks {
filteredChks += len(grp)
}
continue outer continue outer
} }
} }
} }
metrics.chunks.WithLabelValues(statusFiltered).Add(float64(filtered)) metrics.chunks.WithLabelValues(statusDiscarded).Add(float64(filteredChks))
metrics.series.WithLabelValues(statusDiscarded).Add(float64(filteredSeries))
return chks return chks
} }

@ -164,16 +164,17 @@ func (s *store) lazyChunks(ctx context.Context, matchers []*labels.Matcher, from
return nil, err return nil, err
} }
var prefilter int var prefiltered int
var filtered int var filtered int
for i := range chks { for i := range chks {
prefilter += len(chks[i]) prefiltered += len(chks[i])
storeStats.TotalChunksRef += int64(len(chks[i])) storeStats.TotalChunksRef += int64(len(chks[i]))
chks[i] = filterChunksByTime(from, through, chks[i]) chks[i] = filterChunksByTime(from, through, chks[i])
filtered += len(chks[i]) filtered += len(chks[i])
} }
s.chunkMetrics.refs.Add(float64(prefilter))
s.chunkMetrics.filteredRefs.Add(float64(filtered)) s.chunkMetrics.refs.WithLabelValues(statusDiscarded).Add(float64(prefiltered - filtered))
s.chunkMetrics.refs.WithLabelValues(statusMatched).Add(float64(filtered))
// creates lazychunks with chunks ref. // creates lazychunks with chunks ref.
lazyChunks := make([]*LazyChunk, 0, filtered) lazyChunks := make([]*LazyChunk, 0, filtered)

Loading…
Cancel
Save