feat(metadata): introduce a separate split interval for recent query window (#11897)

pull/11932/head^2
Ashwanth 1 year ago committed by GitHub
parent 1b4d23f9b7
commit 9e7725b31b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 1
      CHANGELOG.md
  2. 24
      docs/sources/configure/_index.md
  3. 1
      pkg/bloomgateway/cache.go
  4. 1
      pkg/querier/queryrange/index_stats_cache.go
  5. 33
      pkg/querier/queryrange/labels_cache.go
  6. 334
      pkg/querier/queryrange/labels_cache_test.go
  7. 2
      pkg/querier/queryrange/limits/definitions.go
  8. 2
      pkg/querier/queryrange/queryrangebase/results_cache.go
  9. 3
      pkg/querier/queryrange/queryrangebase/results_cache_test.go
  10. 3
      pkg/querier/queryrange/roundtrip.go
  11. 52
      pkg/querier/queryrange/roundtrip_test.go
  12. 9
      pkg/querier/queryrange/series_cache.go
  13. 366
      pkg/querier/queryrange/series_cache_test.go
  14. 370
      pkg/querier/queryrange/split_by_interval_test.go
  15. 69
      pkg/querier/queryrange/splitters.go
  16. 1
      pkg/querier/queryrange/volume_cache.go
  17. 24
      pkg/storage/chunk/cache/resultscache/cache.go
  18. 120
      pkg/storage/chunk/cache/resultscache/cache_test.go
  19. 37
      pkg/validation/limits.go

@ -55,6 +55,7 @@
* [11143](https://github.com/grafana/loki/pull/11143) **sandeepsukhani** otel: Add support for per tenant configuration for mapping otlp data to loki format * [11143](https://github.com/grafana/loki/pull/11143) **sandeepsukhani** otel: Add support for per tenant configuration for mapping otlp data to loki format
* [11499](https://github.com/grafana/loki/pull/11284) **jmichalek132** Config: Adds `frontend.log-query-request-headers` to enable logging of request headers in query logs. * [11499](https://github.com/grafana/loki/pull/11284) **jmichalek132** Config: Adds `frontend.log-query-request-headers` to enable logging of request headers in query logs.
* [11817](https://github.com/grafana/loki/pull/11817) **ashwanthgoli** Ruler: Add support for filtering results of `/prometheus/api/v1/rules` endpoint by rule_name, rule_group, file and type. * [11817](https://github.com/grafana/loki/pull/11817) **ashwanthgoli** Ruler: Add support for filtering results of `/prometheus/api/v1/rules` endpoint by rule_name, rule_group, file and type.
* [11897](https://github.com/grafana/loki/pull/11897) **ashwanthgoli** Metadata: Introduces a separate split interval of `split_recent_metadata_queries_by_interval` for `recent_metadata_query_window` to help with caching recent metadata query results.
##### Fixes ##### Fixes
* [11074](https://github.com/grafana/loki/pull/11074) **hainenber** Fix panic in lambda-promtail due to mishandling of empty DROP_LABELS env var. * [11074](https://github.com/grafana/loki/pull/11074) **hainenber** Fix panic in lambda-promtail due to mishandling of empty DROP_LABELS env var.

@ -2911,6 +2911,30 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# CLI flag: -querier.split-metadata-queries-by-interval # CLI flag: -querier.split-metadata-queries-by-interval
[split_metadata_queries_by_interval: <duration> | default = 1d] [split_metadata_queries_by_interval: <duration> | default = 1d]
# Experimental. Split interval to use for the portion of metadata request that
# falls within `recent_metadata_query_window`. Rest of the request which is
# outside the window still uses `split_metadata_queries_by_interval`. If set to
# 0, the entire request defaults to using a split interval of
# `split_metadata_queries_by_interval.`.
# CLI flag: -experimental.querier.split-recent-metadata-queries-by-interval
[split_recent_metadata_queries_by_interval: <duration> | default = 1h]
# Experimental. Metadata query window inside which
# `split_recent_metadata_queries_by_interval` gets applied, portion of the
# metadata request that falls in this window is split using
# `split_recent_metadata_queries_by_interval`. The value 0 disables using a
# different split interval for recent metadata queries.
#
# This is added to improve cacheability of recent metadata queries. Query split
# interval also determines the interval used in cache key. The default split
# interval of 24h is useful for caching long queries, each cache key holding 1
# day's results. But metadata queries are often shorter than 24h, to cache them
# effectively we need a smaller split interval. `recent_metadata_query_window`
# along with `split_recent_metadata_queries_by_interval` help configure a
# shorter split interval for recent metadata queries.
# CLI flag: -experimental.querier.recent-metadata-query-window
[recent_metadata_query_window: <duration> | default = 0s]
# Interval to use for time-based splitting when a request is within the # Interval to use for time-based splitting when a request is within the
# `query_ingesters_within` window; defaults to `split-queries-by-interval` by # `query_ingesters_within` window; defaults to `split-queries-by-interval` by
# setting to 0. # setting to 0.

@ -182,6 +182,7 @@ func NewBloomGatewayClientCacheMiddleware(
}, },
cacheGen, cacheGen,
retentionEnabled, retentionEnabled,
false,
) )
return &ClientCache{ return &ClientCache{

@ -123,6 +123,7 @@ func NewIndexStatsCacheMiddleware(
}, },
parallelismForReq, parallelismForReq,
retentionEnabled, retentionEnabled,
false,
metrics, metrics,
) )
} }

@ -11,21 +11,42 @@ import (
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/validation"
) )
type cacheKeyLabels struct { type cacheKeyLabels struct {
Limits Limits
transformer UserIDTransformer transformer UserIDTransformer
iqo util.IngesterQueryOptions }
// metadataSplitIntervalForTimeRange returns split interval for series and label requests.
// If `recent_metadata_query_window` is configured and the query start interval is within this window,
// it returns `split_recent_metadata_queries_by_interval`.
// For other cases, the default split interval of `split_metadata_queries_by_interval` will be used.
func metadataSplitIntervalForTimeRange(limits Limits, tenantIDs []string, ref, start time.Time) time.Duration {
split := validation.MaxDurationOrZeroPerTenant(tenantIDs, limits.MetadataQuerySplitDuration)
recentMetadataQueryWindow := validation.MaxDurationOrZeroPerTenant(tenantIDs, limits.RecentMetadataQueryWindow)
recentMetadataQuerySplitInterval := validation.MaxDurationOrZeroPerTenant(tenantIDs, limits.RecentMetadataQuerySplitDuration)
// if either of the options are not configured, use the default metadata split interval
if recentMetadataQueryWindow == 0 || recentMetadataQuerySplitInterval == 0 {
return split
}
// if the query start is not before window start, it would be split using recentMetadataQuerySplitInterval
if windowStart := ref.Add(-recentMetadataQueryWindow); !start.Before(windowStart) {
split = recentMetadataQuerySplitInterval
}
return split
} }
// GenerateCacheKey generates a cache key based on the userID, split duration and the interval of the request. // GenerateCacheKey generates a cache key based on the userID, split duration and the interval of the request.
// It also includes the label name and the provided query for label values request. // It also includes the label name and the provided query for label values request.
func (i cacheKeyLabels) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string { func (i cacheKeyLabels) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
lr := r.(*LabelRequest) lr := r.(*LabelRequest)
split := metadataSplitIntervalForTimeRange(i.Limits, []string{userID}, time.Now().UTC(), r.GetStart().UTC())
split := SplitIntervalForTimeRange(i.iqo, i.Limits, i.MetadataQuerySplitDuration, []string{userID}, time.Now().UTC(), r.GetEnd().UTC())
var currentInterval int64 var currentInterval int64
if denominator := int64(split / time.Millisecond); denominator > 0 { if denominator := int64(split / time.Millisecond); denominator > 0 {
@ -80,7 +101,6 @@ func NewLabelsCacheMiddleware(
merger queryrangebase.Merger, merger queryrangebase.Merger,
c cache.Cache, c cache.Cache,
cacheGenNumberLoader queryrangebase.CacheGenNumberLoader, cacheGenNumberLoader queryrangebase.CacheGenNumberLoader,
iqo util.IngesterQueryOptions,
shouldCache queryrangebase.ShouldCacheFn, shouldCache queryrangebase.ShouldCacheFn,
parallelismForReq queryrangebase.ParallelismForReqFn, parallelismForReq queryrangebase.ParallelismForReqFn,
retentionEnabled bool, retentionEnabled bool,
@ -90,7 +110,7 @@ func NewLabelsCacheMiddleware(
return queryrangebase.NewResultsCacheMiddleware( return queryrangebase.NewResultsCacheMiddleware(
logger, logger,
c, c,
cacheKeyLabels{limits, transformer, iqo}, cacheKeyLabels{limits, transformer},
limits, limits,
merger, merger,
labelsExtractor{}, labelsExtractor{},
@ -100,6 +120,7 @@ func NewLabelsCacheMiddleware(
}, },
parallelismForReq, parallelismForReq,
retentionEnabled, retentionEnabled,
true,
metrics, metrics,
) )
} }

@ -70,7 +70,6 @@ func TestLabelsCache(t *testing.T) {
cache.NewMockCache(), cache.NewMockCache(),
nil, nil,
nil, nil,
nil,
func(_ context.Context, _ []string, _ queryrangebase.Request) int { func(_ context.Context, _ []string, _ queryrangebase.Request) int {
return 1 return 1
}, },
@ -82,173 +81,124 @@ func TestLabelsCache(t *testing.T) {
return cacheMiddleware return cacheMiddleware
} }
cacheMiddleware := setupCacheMW() composeLabelsResp := func(lbls []string, splits int64) *LokiLabelNamesResponse {
for _, values := range []bool{false, true} { return &LokiLabelNamesResponse{
prefix := "labels" Status: "success",
if values { Version: uint32(loghttp.VersionV1),
prefix = "label values" Data: lbls,
} Statistics: stats.Result{
t.Run(prefix+": cache the response for the same request", func(t *testing.T) { Summary: stats.Summary{
start := testTime.Truncate(time.Millisecond) Splits: splits,
end := start.Add(time.Hour)
labelsReq := LabelRequest{
LabelRequest: logproto.LabelRequest{
Start: &start,
End: &end,
},
}
if values {
labelsReq.Values = true
labelsReq.Name = "foo"
labelsReq.Query = `{cluster="eu-west1"}`
}
labelsResp := &LokiLabelNamesResponse{
Status: "success",
Version: uint32(loghttp.VersionV1),
Data: []string{"bar", "buzz"},
Statistics: stats.Result{
Summary: stats.Summary{
Splits: 1,
},
}, },
} },
}
called := 0
handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
called++
// should request the entire length with no partitioning as nothing is cached yet.
require.Equal(t, labelsReq.GetStart(), r.GetStart())
require.Equal(t, labelsReq.GetEnd(), r.GetEnd())
got := r.(*LabelRequest)
require.Equal(t, labelsReq.GetName(), got.GetName())
require.Equal(t, labelsReq.GetValues(), got.GetValues())
require.Equal(t, labelsReq.GetQuery(), got.GetQuery())
return labelsResp, nil
}))
ctx := user.InjectOrgID(context.Background(), "fake") }
got, err := handler.Do(ctx, &labelsReq)
require.NoError(t, err)
require.Equal(t, 1, called) // called actual handler, as not cached.
require.Equal(t, labelsResp, got)
// Doing same request again shouldn't change anything. start := testTime.Truncate(time.Millisecond)
called = 0 end := start.Add(time.Hour)
got, err = handler.Do(ctx, &labelsReq) labelsReq := &LabelRequest{
require.NoError(t, err) LabelRequest: logproto.LabelRequest{
require.Equal(t, 0, called) Start: &start,
require.Equal(t, labelsResp, got) End: &end,
}) },
} }
labelsResp := composeLabelsResp([]string{"bar", "buzz"}, 1)
var downstreamHandlerFunc func(context.Context, queryrangebase.Request) (queryrangebase.Response, error)
downstreamHandler := &mockDownstreamHandler{fn: func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
return downstreamHandlerFunc(ctx, req)
}}
// reset cacheMiddleware
cacheMiddleware = setupCacheMW()
for _, values := range []bool{false, true} { for _, values := range []bool{false, true} {
labelsReq := labelsReq
prefix := "labels" prefix := "labels"
if values { if values {
prefix = "label values" prefix = "label values: "
labelsReq.Values = true
labelsReq.Name = "foo"
labelsReq.Query = `{cluster="eu-west1"}`
} }
t.Run(prefix+": a new request with overlapping time range should reuse part of the previous request for the overlap", func(t *testing.T) {
cacheMiddleware := setupCacheMW()
start := testTime.Truncate(time.Millisecond)
end := start.Add(time.Hour)
labelsReq1 := LabelRequest{ for _, tc := range []struct {
LabelRequest: logproto.LabelRequest{ name string
Start: &start, req queryrangebase.Request
End: &end, expectedQueryStart, expectedQueryEnd time.Time
}, downstreamResponse *LokiLabelNamesResponse
} downstreamCalls int
expectedReponse *LokiLabelNamesResponse
if values { }{
labelsReq1.Values = true {
labelsReq1.Name = "foo" name: "return cached response for the same request",
labelsReq1.Query = `{cluster="eu-west1"}` downstreamCalls: 0,
} expectedReponse: labelsResp,
req: labelsReq,
labelsResp1 := &LokiLabelNamesResponse{ },
Status: "success", {
Version: uint32(loghttp.VersionV1), name: "a new request with overlapping time range should reuse results of the previous request",
Data: []string{"bar", "buzz"}, req: labelsReq.WithStartEnd(labelsReq.GetStart(), labelsReq.GetEnd().Add(15*time.Minute)),
Statistics: stats.Result{ expectedQueryStart: labelsReq.GetEnd(),
Summary: stats.Summary{ expectedQueryEnd: labelsReq.GetEnd().Add(15 * time.Minute),
Splits: 1, downstreamCalls: 1,
}, downstreamResponse: composeLabelsResp([]string{"fizz"}, 1),
}, expectedReponse: composeLabelsResp([]string{"bar", "buzz", "fizz"}, 2),
} },
{
// To avoid returning incorrect results, we only use extents that are entirely within the requested query range.
name: "cached response not entirely within the requested range",
req: labelsReq.WithStartEnd(labelsReq.GetStart().Add(15*time.Minute), labelsReq.GetEnd().Add(-15*time.Minute)),
expectedQueryStart: labelsReq.GetStart().Add(15 * time.Minute),
expectedQueryEnd: labelsReq.GetEnd().Add(-15 * time.Minute),
downstreamCalls: 1,
downstreamResponse: composeLabelsResp([]string{"buzz", "fizz"}, 1),
expectedReponse: composeLabelsResp([]string{"buzz", "fizz"}, 1),
},
} {
t.Run(prefix+tc.name, func(t *testing.T) {
cacheMiddleware := setupCacheMW()
downstreamHandler.ResetCount()
downstreamHandlerFunc = func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
// should request the entire length with no partitioning as nothing is cached yet.
require.Equal(t, labelsReq.GetStart(), r.GetStart())
require.Equal(t, labelsReq.GetEnd(), r.GetEnd())
got := r.(*LabelRequest)
require.Equal(t, labelsReq.GetName(), got.GetName())
require.Equal(t, labelsReq.GetValues(), got.GetValues())
require.Equal(t, labelsReq.GetQuery(), got.GetQuery())
return labelsResp, nil
}
called := 0 handler := cacheMiddleware.Wrap(downstreamHandler)
handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
called++
// should request the entire length with no partitioning as nothing is cached yet. ctx := user.InjectOrgID(context.Background(), "fake")
require.Equal(t, labelsReq1.GetStart(), r.GetStart()) got, err := handler.Do(ctx, labelsReq)
require.Equal(t, labelsReq1.GetEnd(), r.GetEnd()) require.NoError(t, err)
require.Equal(t, 1, downstreamHandler.Called()) // call downstream handler, as not cached.
require.Equal(t, labelsResp, got)
got := r.(*LabelRequest) downstreamHandler.ResetCount()
require.Equal(t, labelsReq1.GetName(), got.GetName()) downstreamHandlerFunc = func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
require.Equal(t, labelsReq1.GetValues(), got.GetValues()) require.Equal(t, tc.expectedQueryStart, r.GetStart())
require.Equal(t, labelsReq1.GetQuery(), got.GetQuery()) require.Equal(t, tc.expectedQueryEnd, r.GetEnd())
return labelsResp1, nil got := r.(*LabelRequest)
})) require.Equal(t, labelsReq.GetName(), got.GetName())
require.Equal(t, labelsReq.GetValues(), got.GetValues())
require.Equal(t, labelsReq.GetQuery(), got.GetQuery())
ctx := user.InjectOrgID(context.Background(), "fake") return tc.downstreamResponse, nil
got, err := handler.Do(ctx, &labelsReq1) }
require.NoError(t, err)
require.Equal(t, 1, called)
require.Equal(t, labelsResp1, got)
labelsReq2 := labelsReq1.WithStartEnd(labelsReq1.GetStart().Add(15*time.Minute), labelsReq1.GetEnd().Add(15*time.Minute)) got, err = handler.Do(ctx, tc.req)
require.NoError(t, err)
require.Equal(t, tc.downstreamCalls, downstreamHandler.Called())
require.Equal(t, tc.expectedReponse, got)
called = 0 })
handler = cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { }
called++
// make downstream request only for the non-overlapping portion of the query.
require.Equal(t, labelsReq1.GetEnd(), r.GetStart())
require.Equal(t, labelsReq2.GetEnd(), r.GetEnd())
got := r.(*LabelRequest)
require.Equal(t, labelsReq1.GetName(), got.GetName())
require.Equal(t, labelsReq1.GetValues(), got.GetValues())
require.Equal(t, labelsReq1.GetQuery(), got.GetQuery())
return &LokiLabelNamesResponse{
Status: "success",
Version: uint32(loghttp.VersionV1),
Data: []string{"fizz"},
Statistics: stats.Result{
Summary: stats.Summary{
Splits: 1,
},
},
}, nil
}))
got, err = handler.Do(ctx, labelsReq2)
require.NoError(t, err)
require.Equal(t, 1, called)
// two splits as we merge the results from the extent and downstream request
labelsResp1.Statistics.Summary.Splits = 2
require.Equal(t, &LokiLabelNamesResponse{
Status: "success",
Version: uint32(loghttp.VersionV1),
Data: []string{"bar", "buzz", "fizz"},
Statistics: stats.Result{
Summary: stats.Summary{
Splits: 2,
},
},
}, got)
})
} }
} }
@ -310,7 +260,6 @@ func TestLabelCache_freshness(t *testing.T) {
cache.NewMockCache(), cache.NewMockCache(),
nil, nil,
nil, nil,
nil,
func(_ context.Context, _ []string, _ queryrangebase.Request) int { func(_ context.Context, _ []string, _ queryrangebase.Request) int {
return 1 return 1
}, },
@ -363,67 +312,46 @@ func TestLabelCache_freshness(t *testing.T) {
func TestLabelQueryCacheKey(t *testing.T) { func TestLabelQueryCacheKey(t *testing.T) {
const ( const (
defaultTenant = "a" defaultSplit = time.Hour
alternateTenant = "b" recentMetadataSplitDuration = 30 * time.Minute
defaultSplit = time.Hour recentMetadataQueryWindow = time.Hour
ingesterSplit = 90 * time.Minute
ingesterQueryWindow = defaultSplit * 3
) )
l := fakeLimits{ l := fakeLimits{
metadataSplitDuration: map[string]time.Duration{defaultTenant: defaultSplit, alternateTenant: defaultSplit}, metadataSplitDuration: map[string]time.Duration{tenantID: defaultSplit},
ingesterSplitDuration: map[string]time.Duration{defaultTenant: ingesterSplit}, recentMetadataSplitDuration: map[string]time.Duration{tenantID: recentMetadataSplitDuration},
recentMetadataQueryWindow: map[string]time.Duration{tenantID: recentMetadataQueryWindow},
} }
cases := []struct { cases := []struct {
name, tenantID string name string
start, end time.Time start, end time.Time
expectedSplit time.Duration expectedSplit time.Duration
iqo util.IngesterQueryOptions values bool
values bool limits Limits
}{ }{
{ {
name: "outside ingester query window", name: "outside recent metadata query window",
tenantID: defaultTenant, start: time.Now().Add(-3 * time.Hour),
start: time.Now().Add(-6 * time.Hour), end: time.Now().Add(-2 * time.Hour),
end: time.Now().Add(-5 * time.Hour),
expectedSplit: defaultSplit, expectedSplit: defaultSplit,
iqo: ingesterQueryOpts{ limits: l,
queryIngestersWithin: ingesterQueryWindow,
queryStoreOnly: false,
},
},
{
name: "within ingester query window",
tenantID: defaultTenant,
start: time.Now().Add(-6 * time.Hour),
end: time.Now().Add(-ingesterQueryWindow / 2),
expectedSplit: ingesterSplit,
iqo: ingesterQueryOpts{
queryIngestersWithin: ingesterQueryWindow,
queryStoreOnly: false,
},
}, },
{ {
name: "within ingester query window, but query store only", name: "within recent metadata query window",
tenantID: defaultTenant, start: time.Now().Add(-30 * time.Minute),
start: time.Now().Add(-6 * time.Hour), end: time.Now(),
end: time.Now().Add(-ingesterQueryWindow / 2), expectedSplit: recentMetadataSplitDuration,
expectedSplit: defaultSplit, limits: l,
iqo: ingesterQueryOpts{
queryIngestersWithin: ingesterQueryWindow,
queryStoreOnly: true,
},
}, },
{ {
name: "within ingester query window, but no ingester split duration configured", name: "within recent metadata query window, but recent split duration is not configured",
tenantID: alternateTenant, start: time.Now().Add(-30 * time.Minute),
start: time.Now().Add(-6 * time.Hour), end: time.Now(),
end: time.Now().Add(-ingesterQueryWindow / 2),
expectedSplit: defaultSplit, expectedSplit: defaultSplit,
iqo: ingesterQueryOpts{ limits: fakeLimits{
queryIngestersWithin: ingesterQueryWindow, metadataSplitDuration: map[string]time.Duration{tenantID: defaultSplit},
queryStoreOnly: false, recentMetadataQueryWindow: map[string]time.Duration{tenantID: recentMetadataQueryWindow},
}, },
}, },
} }
@ -431,7 +359,7 @@ func TestLabelQueryCacheKey(t *testing.T) {
for _, values := range []bool{true, false} { for _, values := range []bool{true, false} {
for _, tc := range cases { for _, tc := range cases {
t.Run(fmt.Sprintf("%s (values: %v)", tc.name, values), func(t *testing.T) { t.Run(fmt.Sprintf("%s (values: %v)", tc.name, values), func(t *testing.T) {
keyGen := cacheKeyLabels{l, nil, tc.iqo} keyGen := cacheKeyLabels{tc.limits, nil}
r := &LabelRequest{ r := &LabelRequest{
LabelRequest: logproto.LabelRequest{ LabelRequest: logproto.LabelRequest{
@ -453,12 +381,12 @@ func TestLabelQueryCacheKey(t *testing.T) {
// and therefore we can't know the current interval apriori without duplicating the logic // and therefore we can't know the current interval apriori without duplicating the logic
var pattern *regexp.Regexp var pattern *regexp.Regexp
if values { if values {
pattern = regexp.MustCompile(fmt.Sprintf(`labelvalues:%s:%s:%s:(\d+):%d`, tc.tenantID, labelName, regexp.QuoteMeta(query), tc.expectedSplit)) pattern = regexp.MustCompile(fmt.Sprintf(`labelvalues:%s:%s:%s:(\d+):%d`, tenantID, labelName, regexp.QuoteMeta(query), tc.expectedSplit))
} else { } else {
pattern = regexp.MustCompile(fmt.Sprintf(`labels:%s:(\d+):%d`, tc.tenantID, tc.expectedSplit)) pattern = regexp.MustCompile(fmt.Sprintf(`labels:%s:(\d+):%d`, tenantID, tc.expectedSplit))
} }
require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tc.tenantID, r)) require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tenantID, r))
}) })
} }
} }

@ -15,6 +15,8 @@ type Limits interface {
logql.Limits logql.Limits
QuerySplitDuration(string) time.Duration QuerySplitDuration(string) time.Duration
MetadataQuerySplitDuration(string) time.Duration MetadataQuerySplitDuration(string) time.Duration
RecentMetadataQuerySplitDuration(string) time.Duration
RecentMetadataQueryWindow(string) time.Duration
IngesterQuerySplitDuration(string) time.Duration IngesterQuerySplitDuration(string) time.Duration
MaxQuerySeries(context.Context, string) int MaxQuerySeries(context.Context, string) int
MaxEntriesLimitPerQuery(context.Context, string) int MaxEntriesLimitPerQuery(context.Context, string) int

@ -127,6 +127,7 @@ func NewResultsCacheMiddleware(
shouldCache ShouldCacheFn, shouldCache ShouldCacheFn,
parallelismForReq ParallelismForReqFn, parallelismForReq ParallelismForReqFn,
retentionEnabled bool, retentionEnabled bool,
onlyUseEntireExtent bool,
metrics *ResultsCacheMetrics, metrics *ResultsCacheMetrics,
) (Middleware, error) { ) (Middleware, error) {
if cacheGenNumberLoader != nil { if cacheGenNumberLoader != nil {
@ -172,6 +173,7 @@ func NewResultsCacheMiddleware(
parallelismForReqWrapper, parallelismForReqWrapper,
cacheGenNumberLoader, cacheGenNumberLoader,
retentionEnabled, retentionEnabled,
onlyUseEntireExtent,
) )
return out return out

@ -422,6 +422,7 @@ func TestResultsCache(t *testing.T) {
return mockLimits{}.MaxQueryParallelism(context.Background(), "fake") return mockLimits{}.MaxQueryParallelism(context.Background(), "fake")
}, },
false, false,
false,
nil, nil,
) )
require.NoError(t, err) require.NoError(t, err)
@ -468,6 +469,7 @@ func TestResultsCacheRecent(t *testing.T) {
return mockLimits{}.MaxQueryParallelism(context.Background(), "fake") return mockLimits{}.MaxQueryParallelism(context.Background(), "fake")
}, },
false, false,
false,
nil, nil,
) )
require.NoError(t, err) require.NoError(t, err)
@ -578,6 +580,7 @@ func TestResultsCacheShouldCacheFunc(t *testing.T) {
return mockLimits{}.MaxQueryParallelism(context.Background(), "fake") return mockLimits{}.MaxQueryParallelism(context.Background(), "fake")
}, },
false, false,
false,
nil, nil,
) )
require.NoError(t, err) require.NoError(t, err)

@ -514,7 +514,6 @@ func NewSeriesTripperware(
merger, merger,
c, c,
cacheGenNumLoader, cacheGenNumLoader,
iqo,
func(_ context.Context, r base.Request) bool { func(_ context.Context, r base.Request) bool {
return !r.GetCachingOptions().Disabled return !r.GetCachingOptions().Disabled
}, },
@ -600,7 +599,6 @@ func NewLabelsTripperware(
merger, merger,
c, c,
cacheGenNumLoader, cacheGenNumLoader,
iqo,
func(_ context.Context, r base.Request) bool { func(_ context.Context, r base.Request) bool {
return !r.GetCachingOptions().Disabled return !r.GetCachingOptions().Disabled
}, },
@ -679,6 +677,7 @@ func NewMetricTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logge
) )
}, },
retentionEnabled, retentionEnabled,
false,
metrics.ResultsCacheMetrics, metrics.ResultsCacheMetrics,
) )
if err != nil { if err != nil {

@ -1237,24 +1237,26 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) {
} }
type fakeLimits struct { type fakeLimits struct {
maxQueryLength time.Duration maxQueryLength time.Duration
maxQueryParallelism int maxQueryParallelism int
tsdbMaxQueryParallelism int tsdbMaxQueryParallelism int
maxQueryLookback time.Duration maxQueryLookback time.Duration
maxEntriesLimitPerQuery int maxEntriesLimitPerQuery int
maxSeries int maxSeries int
splitDuration map[string]time.Duration splitDuration map[string]time.Duration
metadataSplitDuration map[string]time.Duration metadataSplitDuration map[string]time.Duration
ingesterSplitDuration map[string]time.Duration recentMetadataSplitDuration map[string]time.Duration
minShardingLookback time.Duration recentMetadataQueryWindow map[string]time.Duration
queryTimeout time.Duration ingesterSplitDuration map[string]time.Duration
requiredLabels []string minShardingLookback time.Duration
requiredNumberLabels int queryTimeout time.Duration
maxQueryBytesRead int requiredLabels []string
maxQuerierBytesRead int requiredNumberLabels int
maxStatsCacheFreshness time.Duration maxQueryBytesRead int
maxMetadataCacheFreshness time.Duration maxQuerierBytesRead int
volumeEnabled bool maxStatsCacheFreshness time.Duration
maxMetadataCacheFreshness time.Duration
volumeEnabled bool
} }
func (f fakeLimits) QuerySplitDuration(key string) time.Duration { func (f fakeLimits) QuerySplitDuration(key string) time.Duration {
@ -1271,6 +1273,20 @@ func (f fakeLimits) MetadataQuerySplitDuration(key string) time.Duration {
return f.metadataSplitDuration[key] return f.metadataSplitDuration[key]
} }
func (f fakeLimits) RecentMetadataQuerySplitDuration(key string) time.Duration {
if f.recentMetadataSplitDuration == nil {
return 0
}
return f.recentMetadataSplitDuration[key]
}
func (f fakeLimits) RecentMetadataQueryWindow(key string) time.Duration {
if f.recentMetadataQueryWindow == nil {
return 0
}
return f.recentMetadataQueryWindow[key]
}
func (f fakeLimits) IngesterQuerySplitDuration(key string) time.Duration { func (f fakeLimits) IngesterQuerySplitDuration(key string) time.Duration {
if f.ingesterSplitDuration == nil { if f.ingesterSplitDuration == nil {
return 0 return 0

@ -17,21 +17,18 @@ import (
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/validation" "github.com/grafana/loki/pkg/util/validation"
) )
type cacheKeySeries struct { type cacheKeySeries struct {
Limits Limits
transformer UserIDTransformer transformer UserIDTransformer
iqo util.IngesterQueryOptions
} }
// GenerateCacheKey generates a cache key based on the userID, matchers, split duration and the interval of the request. // GenerateCacheKey generates a cache key based on the userID, matchers, split duration and the interval of the request.
func (i cacheKeySeries) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string { func (i cacheKeySeries) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
sr := r.(*LokiSeriesRequest) sr := r.(*LokiSeriesRequest)
split := metadataSplitIntervalForTimeRange(i.Limits, []string{userID}, time.Now().UTC(), r.GetStart().UTC())
split := SplitIntervalForTimeRange(i.iqo, i.Limits, i.MetadataQuerySplitDuration, []string{userID}, time.Now().UTC(), r.GetEnd().UTC())
var currentInterval int64 var currentInterval int64
if denominator := int64(split / time.Millisecond); denominator > 0 { if denominator := int64(split / time.Millisecond); denominator > 0 {
@ -87,7 +84,6 @@ func NewSeriesCacheMiddleware(
merger queryrangebase.Merger, merger queryrangebase.Merger,
c cache.Cache, c cache.Cache,
cacheGenNumberLoader queryrangebase.CacheGenNumberLoader, cacheGenNumberLoader queryrangebase.CacheGenNumberLoader,
iqo util.IngesterQueryOptions,
shouldCache queryrangebase.ShouldCacheFn, shouldCache queryrangebase.ShouldCacheFn,
parallelismForReq queryrangebase.ParallelismForReqFn, parallelismForReq queryrangebase.ParallelismForReqFn,
retentionEnabled bool, retentionEnabled bool,
@ -97,7 +93,7 @@ func NewSeriesCacheMiddleware(
return queryrangebase.NewResultsCacheMiddleware( return queryrangebase.NewResultsCacheMiddleware(
logger, logger,
c, c,
cacheKeySeries{limits, transformer, iqo}, cacheKeySeries{limits, transformer},
limits, limits,
merger, merger,
seriesExtractor{}, seriesExtractor{},
@ -107,6 +103,7 @@ func NewSeriesCacheMiddleware(
}, },
parallelismForReq, parallelismForReq,
retentionEnabled, retentionEnabled,
true,
metrics, metrics,
) )
} }

@ -78,7 +78,6 @@ func TestSeriesCache(t *testing.T) {
cache.NewMockCache(), cache.NewMockCache(),
nil, nil,
nil, nil,
nil,
func(_ context.Context, _ []string, _ queryrangebase.Request) int { func(_ context.Context, _ []string, _ queryrangebase.Request) int {
return 1 return 1
}, },
@ -91,195 +90,135 @@ func TestSeriesCache(t *testing.T) {
return cacheMiddleware return cacheMiddleware
} }
t.Run("caches the response for the same request", func(t *testing.T) { composeSeriesResp := func(series [][]logproto.SeriesIdentifier_LabelsEntry, splits int64) *LokiSeriesResponse {
cacheMiddleware := setupCacheMW() var data []logproto.SeriesIdentifier
from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour)) for _, v := range series {
data = append(data, logproto.SeriesIdentifier{Labels: v})
seriesReq := &LokiSeriesRequest{
StartTs: from.Time(),
EndTs: through.Time(),
Match: []string{`{namespace=~".*"}`},
Path: seriesAPIPath,
} }
seriesResp := &LokiSeriesResponse{ return &LokiSeriesResponse{
Status: "success", Status: "success",
Version: uint32(loghttp.VersionV1), Version: uint32(loghttp.VersionV1),
Data: []logproto.SeriesIdentifier{ Data: data,
{
Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
},
},
Statistics: stats.Result{ Statistics: stats.Result{
Summary: stats.Summary{ Summary: stats.Summary{
Splits: 1, Splits: splits,
}, },
}, },
} }
}
called := 0 var downstreamHandlerFunc func(context.Context, queryrangebase.Request) (queryrangebase.Response, error)
handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { downstreamHandler := &mockDownstreamHandler{fn: func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
called++ return downstreamHandlerFunc(ctx, req)
}}
// should request the entire length with no partitioning as nothing is cached yet.
require.Equal(t, seriesReq.GetStart(), r.GetStart())
require.Equal(t, seriesReq.GetEnd(), r.GetEnd())
return seriesResp, nil
}))
ctx := user.InjectOrgID(context.Background(), "fake")
got, err := handler.Do(ctx, seriesReq)
require.NoError(t, err)
require.Equal(t, 1, called) // called actual handler, as not cached.
require.Equal(t, seriesResp, got)
// Doing same request again shouldn't change anything.
called = 0
got, err = handler.Do(ctx, seriesReq)
require.NoError(t, err)
require.Equal(t, 0, called)
require.Equal(t, seriesResp, got)
})
t.Run("a new request with overlapping time range should reuse part of the previous request for the overlap", func(t *testing.T) {
cacheMiddleware := setupCacheMW()
from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour))
req1 := &LokiSeriesRequest{
StartTs: from.Time(),
EndTs: through.Time(),
Match: []string{`{namespace=~".*"}`},
Path: seriesAPIPath,
}
resp1 := &LokiSeriesResponse{
Status: "success",
Version: uint32(loghttp.VersionV1),
Data: []logproto.SeriesIdentifier{
{
Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "dev"}},
},
{
Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
},
},
Statistics: stats.Result{
Summary: stats.Summary{
Splits: 1,
},
},
}
called := 0
handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
called++
// should request the entire length with no partitioning as nothing is cached yet.
require.Equal(t, req1.GetStart(), r.GetStart())
require.Equal(t, req1.GetEnd(), r.GetEnd())
return resp1, nil from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour))
})) seriesReq := &LokiSeriesRequest{
StartTs: from.Time(),
EndTs: through.Time(),
Match: []string{`{namespace=~".*"}`},
Path: seriesAPIPath,
}
seriesResp := composeSeriesResp([][]logproto.SeriesIdentifier_LabelsEntry{
{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "dev"}},
{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
}, 1)
for _, tc := range []struct {
name string
req queryrangebase.Request
expectedQueryStart, expectedQueryEnd time.Time
downstreamResponse *LokiSeriesResponse
downstreamCalls int
expectedReponse *LokiSeriesResponse
}{
{
name: "return cached response for the same request",
downstreamCalls: 0,
expectedReponse: seriesResp,
req: seriesReq,
},
{
name: "a new request with overlapping time range should reuse results of the previous request",
req: seriesReq.WithStartEnd(seriesReq.GetStart(), seriesReq.GetEnd().Add(15*time.Minute)),
expectedQueryStart: seriesReq.GetEnd(),
expectedQueryEnd: seriesReq.GetEnd().Add(15 * time.Minute),
downstreamCalls: 1,
downstreamResponse: composeSeriesResp([][]logproto.SeriesIdentifier_LabelsEntry{
{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
}, 1),
expectedReponse: composeSeriesResp([][]logproto.SeriesIdentifier_LabelsEntry{
{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "dev"}},
{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
}, 2),
},
{
// To avoid returning incorrect results, we only use extents that are entirely within the requested query range.
name: "cached response not entirely within the requested range",
req: seriesReq.WithStartEnd(seriesReq.GetStart().Add(15*time.Minute), seriesReq.GetEnd().Add(-15*time.Minute)),
expectedQueryStart: seriesReq.GetStart().Add(15 * time.Minute),
expectedQueryEnd: seriesReq.GetEnd().Add(-15 * time.Minute),
downstreamCalls: 1,
downstreamResponse: composeSeriesResp([][]logproto.SeriesIdentifier_LabelsEntry{
{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
}, 1),
expectedReponse: composeSeriesResp([][]logproto.SeriesIdentifier_LabelsEntry{
{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
}, 1),
},
} {
t.Run(tc.name, func(t *testing.T) {
cacheMiddleware := setupCacheMW()
downstreamHandler.ResetCount()
downstreamHandlerFunc = func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
require.Equal(t, seriesReq.GetStart(), r.GetStart())
require.Equal(t, seriesReq.GetEnd(), r.GetEnd())
ctx := user.InjectOrgID(context.Background(), "fake") return seriesResp, nil
got, err := handler.Do(ctx, req1) }
require.NoError(t, err)
require.Equal(t, 1, called)
require.Equal(t, resp1, got)
req2 := req1.WithStartEnd(req1.GetStart().Add(15*time.Minute), req1.GetEnd().Add(15*time.Minute)) handler := cacheMiddleware.Wrap(downstreamHandler)
called = 0 ctx := user.InjectOrgID(context.Background(), "fake")
handler = cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { got, err := handler.Do(ctx, seriesReq)
called++ require.NoError(t, err)
require.Equal(t, 1, downstreamHandler.Called()) // calls downstream handler, as not cached.
require.Equal(t, seriesResp, got)
// make downstream request only for the non-overlapping portion of the query. downstreamHandler.ResetCount()
require.Equal(t, req1.GetEnd(), r.GetStart()) downstreamHandlerFunc = func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
require.Equal(t, req1.GetEnd().Add(15*time.Minute), r.GetEnd()) require.Equal(t, tc.expectedQueryStart, r.GetStart())
require.Equal(t, tc.expectedQueryEnd, r.GetEnd())
return &LokiSeriesResponse{ return tc.downstreamResponse, nil
Status: "success", }
Version: uint32(loghttp.VersionV1),
Data: []logproto.SeriesIdentifier{
{
Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
},
},
Statistics: stats.Result{
Summary: stats.Summary{
Splits: 1,
},
},
}, nil
}))
got, err = handler.Do(ctx, req2) got, err = handler.Do(ctx, tc.req)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, called) require.Equal(t, tc.downstreamCalls, downstreamHandler.Called())
// two splits as we merge the results from the extent and downstream request require.Equal(t, tc.expectedReponse, got)
resp1.Statistics.Summary.Splits = 2 })
require.Equal(t, &LokiSeriesResponse{ }
Status: "success",
Version: uint32(loghttp.VersionV1),
Data: []logproto.SeriesIdentifier{
{
Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "dev"}},
},
{
Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
},
{
Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
},
},
Statistics: stats.Result{
Summary: stats.Summary{
Splits: 2,
},
},
}, got)
})
t.Run("caches are only valid for the same request parameters", func(t *testing.T) { t.Run("caches are only valid for the same request parameters", func(t *testing.T) {
cacheMiddleware := setupCacheMW() cacheMiddleware := setupCacheMW()
downstreamHandler.ResetCount()
from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour)) downstreamHandlerFunc = func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
seriesReq := &LokiSeriesRequest{
StartTs: from.Time(),
EndTs: through.Time(),
Match: []string{`{namespace=~".*"}`},
Path: seriesAPIPath,
}
seriesResp := &LokiSeriesResponse{
Status: "success",
Version: uint32(loghttp.VersionV1),
Data: []logproto.SeriesIdentifier{
{
Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
},
},
Statistics: stats.Result{
Summary: stats.Summary{
Splits: 1,
},
},
}
called := 0
handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
called++
// should request the entire length as none of the subsequent queries hit the cache.
require.Equal(t, seriesReq.GetStart(), r.GetStart()) require.Equal(t, seriesReq.GetStart(), r.GetStart())
require.Equal(t, seriesReq.GetEnd(), r.GetEnd()) require.Equal(t, seriesReq.GetEnd(), r.GetEnd())
return seriesResp, nil return seriesResp, nil
})) }
handler := cacheMiddleware.Wrap(downstreamHandler)
// initial call to fill cache // initial call to fill cache
ctx := user.InjectOrgID(context.Background(), "fake") ctx := user.InjectOrgID(context.Background(), "fake")
_, err := handler.Do(ctx, seriesReq) _, err := handler.Do(ctx, seriesReq)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, called) require.Equal(t, 1, downstreamHandler.Called())
type testCase struct { type testCase struct {
fn func(*LokiSeriesRequest) fn func(*LokiSeriesRequest)
@ -297,7 +236,7 @@ func TestSeriesCache(t *testing.T) {
} }
for name, tc := range testCases { for name, tc := range testCases {
called = 0 downstreamHandler.ResetCount()
seriesReq := seriesReq seriesReq := seriesReq
if tc.fn != nil { if tc.fn != nil {
@ -310,7 +249,7 @@ func TestSeriesCache(t *testing.T) {
_, err = handler.Do(ctx, seriesReq) _, err = handler.Do(ctx, seriesReq)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, called, name) require.Equal(t, 1, downstreamHandler.Called(), name)
} }
}) })
} }
@ -371,7 +310,6 @@ func TestSeriesCache_freshness(t *testing.T) {
cache.NewMockCache(), cache.NewMockCache(),
nil, nil,
nil, nil,
nil,
func(_ context.Context, _ []string, _ queryrangebase.Request) int { func(_ context.Context, _ []string, _ queryrangebase.Request) int {
return 1 return 1
}, },
@ -428,76 +366,54 @@ func TestSeriesCache_freshness(t *testing.T) {
func TestSeriesQueryCacheKey(t *testing.T) { func TestSeriesQueryCacheKey(t *testing.T) {
const ( const (
defaultTenant = "a" defaultSplit = time.Hour
alternateTenant = "b" recentMetadataSplitDuration = 30 * time.Minute
defaultSplit = time.Hour recentMetadataQueryWindow = time.Hour
ingesterSplit = 90 * time.Minute
ingesterQueryWindow = defaultSplit * 3
) )
l := fakeLimits{ l := fakeLimits{
metadataSplitDuration: map[string]time.Duration{defaultTenant: defaultSplit, alternateTenant: defaultSplit}, metadataSplitDuration: map[string]time.Duration{tenantID: defaultSplit},
ingesterSplitDuration: map[string]time.Duration{defaultTenant: ingesterSplit}, recentMetadataSplitDuration: map[string]time.Duration{tenantID: recentMetadataSplitDuration},
recentMetadataQueryWindow: map[string]time.Duration{tenantID: recentMetadataQueryWindow},
} }
cases := []struct { cases := []struct {
name, tenantID string name string
start, end time.Time start, end time.Time
expectedSplit time.Duration expectedSplit time.Duration
iqo util.IngesterQueryOptions values bool
values bool limits Limits
}{ }{
{ {
name: "outside ingester query window", name: "outside recent metadata query window",
tenantID: defaultTenant, start: time.Now().Add(-3 * time.Hour),
start: time.Now().Add(-6 * time.Hour), end: time.Now().Add(-2 * time.Hour),
end: time.Now().Add(-5 * time.Hour),
expectedSplit: defaultSplit, expectedSplit: defaultSplit,
iqo: ingesterQueryOpts{ limits: l,
queryIngestersWithin: ingesterQueryWindow,
queryStoreOnly: false,
},
}, },
{ {
name: "within ingester query window", name: "within recent metadata query window",
tenantID: defaultTenant, start: time.Now().Add(-30 * time.Minute),
start: time.Now().Add(-6 * time.Hour), end: time.Now(),
end: time.Now().Add(-ingesterQueryWindow / 2), expectedSplit: recentMetadataSplitDuration,
expectedSplit: ingesterSplit, limits: l,
iqo: ingesterQueryOpts{
queryIngestersWithin: ingesterQueryWindow,
queryStoreOnly: false,
},
}, },
{ {
name: "within ingester query window, but query store only", name: "within recent metadata query window, but recent split duration is not configured",
tenantID: defaultTenant, start: time.Now().Add(-30 * time.Minute),
start: time.Now().Add(-6 * time.Hour), end: time.Now(),
end: time.Now().Add(-ingesterQueryWindow / 2),
expectedSplit: defaultSplit, expectedSplit: defaultSplit,
iqo: ingesterQueryOpts{ limits: fakeLimits{
queryIngestersWithin: ingesterQueryWindow, metadataSplitDuration: map[string]time.Duration{tenantID: defaultSplit},
queryStoreOnly: true, recentMetadataQueryWindow: map[string]time.Duration{tenantID: recentMetadataQueryWindow},
},
},
{
name: "within ingester query window, but no ingester split duration configured",
tenantID: alternateTenant,
start: time.Now().Add(-6 * time.Hour),
end: time.Now().Add(-ingesterQueryWindow / 2),
expectedSplit: defaultSplit,
iqo: ingesterQueryOpts{
queryIngestersWithin: ingesterQueryWindow,
queryStoreOnly: false,
}, },
}, },
} }
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
matchers := []string{`{namespace="prod"}`, `{service="foo"}`} matchers := []string{`{namespace="prod"}`, `{service="foo"}`}
keyGen := cacheKeySeries{l, nil, tc.iqo} keyGen := cacheKeySeries{tc.limits, nil}
r := &LokiSeriesRequest{ r := &LokiSeriesRequest{
StartTs: tc.start, StartTs: tc.start,
@ -508,9 +424,27 @@ func TestSeriesQueryCacheKey(t *testing.T) {
// we use regex here because cache key always refers to the current time to get the ingester query window, // we use regex here because cache key always refers to the current time to get the ingester query window,
// and therefore we can't know the current interval apriori without duplicating the logic // and therefore we can't know the current interval apriori without duplicating the logic
pattern := regexp.MustCompile(fmt.Sprintf(`series:%s:%s:(\d+):%d`, tc.tenantID, regexp.QuoteMeta(keyGen.joinMatchers(matchers)), tc.expectedSplit)) pattern := regexp.MustCompile(fmt.Sprintf(`series:%s:%s:(\d+):%d`, tenantID, regexp.QuoteMeta(keyGen.joinMatchers(matchers)), tc.expectedSplit))
require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tc.tenantID, r)) require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tenantID, r))
}) })
} }
} }
type mockDownstreamHandler struct {
called int
fn func(context.Context, queryrangebase.Request) (queryrangebase.Response, error)
}
func (m *mockDownstreamHandler) Called() int {
return m.called
}
func (m *mockDownstreamHandler) ResetCount() {
m.called = 0
}
func (m *mockDownstreamHandler) Do(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
m.called++
return m.fn(ctx, req)
}

@ -159,10 +159,12 @@ func Test_splitQuery(t *testing.T) {
t.Run(requestType, func(t *testing.T) { t.Run(requestType, func(t *testing.T) {
for name, intervals := range map[string]struct { for name, intervals := range map[string]struct {
input interval input interval
expected []interval expected []interval
splitInterval time.Duration expectedWithoutIngesterSplits []interval
splitter splitter splitInterval time.Duration
splitter splitter
recentMetadataQueryWindowEnabled bool
}{ }{
"no change": { "no change": {
input: interval{ input: interval{
@ -255,6 +257,16 @@ func Test_splitQuery(t *testing.T) {
end: refTime, end: refTime,
}, },
}, },
expectedWithoutIngesterSplits: []interval{
{
start: refTime.Add(-time.Hour).Truncate(time.Second),
end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
end: refTime,
},
},
splitInterval: time.Hour, splitInterval: time.Hour,
splitter: newDefaultSplitter( splitter: newDefaultSplitter(
fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}}, fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}},
@ -295,6 +307,32 @@ func Test_splitQuery(t *testing.T) {
end: refTime, end: refTime,
}, },
}, },
expectedWithoutIngesterSplits: []interval{
{
start: refTime.Add(-4 * time.Hour).Add(-30 * time.Minute).Truncate(time.Second),
end: time.Date(2023, 1, 15, 4, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 4, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
end: refTime,
},
},
splitInterval: time.Hour, splitInterval: time.Hour,
splitter: newDefaultSplitter( splitter: newDefaultSplitter(
fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}}, fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}},
@ -394,11 +432,63 @@ func Test_splitQuery(t *testing.T) {
ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour, queryStoreOnly: true}, ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour, queryStoreOnly: true},
), ),
}, },
"metadata recent query window should not affect other query types": {
input: interval{
start: refTime.Add(-4 * time.Hour).Truncate(time.Second),
end: refTime,
},
expected: []interval{
{
start: refTime.Add(-4 * time.Hour).Truncate(time.Second),
end: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
end: refTime,
},
},
splitInterval: time.Hour,
splitter: newDefaultSplitter(
fakeLimits{
recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
recentMetadataQueryWindow: map[string]time.Duration{tenantID: 3 * time.Hour},
}, nil,
),
recentMetadataQueryWindowEnabled: true,
},
} { } {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
req := tc.requestBuilderFunc(intervals.input.start, intervals.input.end) req := tc.requestBuilderFunc(intervals.input.start, intervals.input.end)
var want []queryrangebase.Request var want []queryrangebase.Request
for _, exp := range intervals.expected {
// ingester splits do not apply for metadata queries
var expected []interval
switch req.(type) {
case *LabelRequest, *LokiSeriesRequest:
expected = intervals.expectedWithoutIngesterSplits
if intervals.recentMetadataQueryWindowEnabled {
t.Skip("this flow is tested in Test_splitRecentMetadataQuery")
}
}
if expected == nil {
expected = intervals.expected
}
for _, exp := range expected {
want = append(want, tc.requestBuilderFunc(exp.start, exp.end)) want = append(want, tc.requestBuilderFunc(exp.start, exp.end))
} }
@ -412,22 +502,245 @@ func Test_splitQuery(t *testing.T) {
splits, err := intervals.splitter.split(refTime, []string{tenantID}, req, intervals.splitInterval) splits, err := intervals.splitter.split(refTime, []string{tenantID}, req, intervals.splitInterval)
require.NoError(t, err) require.NoError(t, err)
if !assert.Equal(t, want, splits) { assertSplits(t, want, splits)
t.Logf("expected and actual do not match\n") })
defer t.Fail() }
})
}
}
if len(want) != len(splits) { func Test_splitRecentMetadataQuery(t *testing.T) {
t.Logf("expected %d splits, got %d\n", len(want), len(splits)) type interval struct {
return start, end time.Time
} }
for j := 0; j < len(want); j++ { expectedSplitGap := util.SplitGap
exp := want[j]
act := splits[j] for requestType, tc := range map[string]struct {
equal := assert.Equal(t, exp, act) requestBuilderFunc func(start, end time.Time) queryrangebase.Request
t.Logf("\t#%d [matches: %v]: expected %q/%q got %q/%q\n", j, equal, exp.GetStart(), exp.GetEnd(), act.GetStart(), act.GetEnd()) }{
} "series request": {
requestBuilderFunc: func(start, end time.Time) queryrangebase.Request {
return &LokiSeriesRequest{
Match: []string{"match1"},
StartTs: start,
EndTs: end,
Path: "/series",
Shards: []string{"shard1"},
}
},
},
"label names request": {
requestBuilderFunc: func(start, end time.Time) queryrangebase.Request {
return NewLabelRequest(start, end, `{foo="bar"}`, "", "/labels")
},
},
"label values request": {
requestBuilderFunc: func(start, end time.Time) queryrangebase.Request {
return NewLabelRequest(start, end, `{foo="bar"}`, "test", "/label/test/values")
},
},
} {
t.Run(requestType, func(t *testing.T) {
for name, intervals := range map[string]struct {
input interval
expected []interval
splitInterval time.Duration
splitter splitter
}{
"wholly within recent metadata query window": {
input: interval{
start: refTime.Add(-time.Hour),
end: refTime,
},
expected: []interval{
{
start: refTime.Add(-time.Hour),
end: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
end: refTime,
},
},
splitInterval: time.Hour,
splitter: newDefaultSplitter(
fakeLimits{
recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
recentMetadataQueryWindow: map[string]time.Duration{tenantID: 2 * time.Hour},
}, nil,
),
},
"start aligns with recent metadata query window": {
input: interval{
start: refTime.Add(-1 * time.Hour),
end: refTime,
},
expected: []interval{
{
start: refTime.Add(-time.Hour),
end: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
end: refTime,
},
},
splitInterval: time.Hour,
splitter: newDefaultSplitter(
fakeLimits{
recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
recentMetadataQueryWindow: map[string]time.Duration{tenantID: 1 * time.Hour},
}, nil,
),
},
"partially within recent metadata query window": {
input: interval{
start: refTime.Add(-3 * time.Hour),
end: refTime,
},
expected: []interval{
{
start: refTime.Add(-3 * time.Hour),
end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC),
end: refTime.Add(-time.Hour).Add(-expectedSplitGap),
},
// apply split_recent_metadata_queries_by_interval for recent metadata queries
{
start: refTime.Add(-time.Hour),
end: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
end: refTime,
},
},
splitInterval: time.Hour,
splitter: newDefaultSplitter(
fakeLimits{
recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
recentMetadataQueryWindow: map[string]time.Duration{tenantID: 1 * time.Hour},
}, nil,
),
},
"outside recent metadata query window": {
input: interval{
start: refTime.Add(-4 * time.Hour),
end: refTime.Add(-2 * time.Hour),
},
expected: []interval{
{
start: refTime.Add(-4 * time.Hour),
end: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
end: refTime.Add(-2 * time.Hour),
},
},
splitInterval: time.Hour,
splitter: newDefaultSplitter(
fakeLimits{
recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
recentMetadataQueryWindow: map[string]time.Duration{tenantID: 1 * time.Hour},
}, nil,
),
},
"end aligns with recent metadata query window": {
input: interval{
start: refTime.Add(-3 * time.Hour),
end: refTime.Add(-1 * time.Hour),
},
expected: []interval{
{
start: refTime.Add(-3 * time.Hour),
end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC),
end: refTime.Add(-1 * time.Hour),
},
},
splitInterval: time.Hour,
splitter: newDefaultSplitter(
fakeLimits{
recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
recentMetadataQueryWindow: map[string]time.Duration{tenantID: 1 * time.Hour},
}, nil,
),
},
"recent metadata window not configured": {
input: interval{
start: refTime.Add(-3 * time.Hour),
end: refTime,
},
expected: []interval{
{
start: refTime.Add(-3 * time.Hour),
end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC),
end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
},
{
start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
end: refTime,
},
},
splitInterval: time.Hour,
},
} {
t.Run(name, func(t *testing.T) {
req := tc.requestBuilderFunc(intervals.input.start, intervals.input.end)
var want []queryrangebase.Request
for _, exp := range intervals.expected {
want = append(want, tc.requestBuilderFunc(exp.start, exp.end))
}
if intervals.splitInterval == 0 {
intervals.splitInterval = time.Hour
} }
if intervals.splitter == nil {
intervals.splitter = newDefaultSplitter(fakeLimits{}, nil)
}
splits, err := intervals.splitter.split(refTime, []string{tenantID}, req, intervals.splitInterval)
require.NoError(t, err)
assertSplits(t, want, splits)
}) })
} }
}) })
@ -1610,3 +1923,24 @@ func Test_DoesntDeadlock(t *testing.T) {
// Allow for 1% increase in goroutines // Allow for 1% increase in goroutines
require.LessOrEqual(t, endingGoroutines, startingGoroutines*101/100) require.LessOrEqual(t, endingGoroutines, startingGoroutines*101/100)
} }
func assertSplits(t *testing.T, want, splits []queryrangebase.Request) {
t.Helper()
if !assert.Equal(t, want, splits) {
t.Logf("expected and actual do not match\n")
defer t.Fail()
if len(want) != len(splits) {
t.Logf("expected %d splits, got %d\n", len(want), len(splits))
return
}
for j := 0; j < len(want); j++ {
exp := want[j]
act := splits[j]
equal := assert.Equal(t, exp, act)
t.Logf("\t#%d [matches: %v]: expected %q/%q got %q/%q\n", j, equal, exp.GetStart(), exp.GetEnd(), act.GetStart(), act.GetEnd())
}
}
}

@ -91,31 +91,56 @@ func (s *defaultSplitter) split(execTime time.Time, tenantIDs []string, req quer
} }
var ( var (
ingesterSplits []queryrangebase.Request splitsBeforeRebound []queryrangebase.Request
origStart = req.GetStart().UTC() origStart = req.GetStart().UTC()
origEnd = req.GetEnd().UTC() origEnd = req.GetEnd().UTC()
start, end = origStart, origEnd
reboundOrigQuery bool
splitIntervalBeforeRebound time.Duration
) )
start, end, needsIngesterSplits := ingesterQueryBounds(execTime, s.iqo, req) switch req.(type) {
// not applying `split_ingester_queries_by_interval` for metadata queries since it solves a different problem of reducing the subqueries sent to the ingesters.
// we instead prefer `split_recent_metadata_queries_by_interval` for metadata queries which favours shorter subqueries to improve cache effectiveness.
// even though the number of subqueries increase, caching should deamplify it overtime.
case *LokiSeriesRequest, *LabelRequest:
var (
recentMetadataQueryWindow = validation.MaxDurationOrZeroPerTenant(tenantIDs, s.limits.RecentMetadataQueryWindow)
recentMetadataQuerySplitInterval = validation.MaxDurationOrZeroPerTenant(tenantIDs, s.limits.RecentMetadataQuerySplitDuration)
)
// if either of them are not configured, we fallback to the default split interval for the entire query length.
if recentMetadataQueryWindow == 0 || recentMetadataQuerySplitInterval == 0 {
break
}
if ingesterQueryInterval := validation.MaxDurationOrZeroPerTenant(tenantIDs, s.limits.IngesterQuerySplitDuration); ingesterQueryInterval != 0 && needsIngesterSplits { start, end, reboundOrigQuery = recentMetadataQueryBounds(execTime, recentMetadataQueryWindow, req)
// perform splitting using special interval (`split_ingester_queries_by_interval`) splitIntervalBeforeRebound = recentMetadataQuerySplitInterval
util.ForInterval(ingesterQueryInterval, start, end, endTimeInclusive, factory) default:
if ingesterQueryInterval := validation.MaxDurationOrZeroPerTenant(tenantIDs, s.limits.IngesterQuerySplitDuration); ingesterQueryInterval != 0 {
start, end, reboundOrigQuery = ingesterQueryBounds(execTime, s.iqo, req)
splitIntervalBeforeRebound = ingesterQueryInterval
}
}
// rebound after ingester queries have been split out if reboundOrigQuery {
util.ForInterval(splitIntervalBeforeRebound, start, end, endTimeInclusive, factory)
// rebound after query portion within ingester query window or recent metadata query window has been split out
end = start end = start
start = req.GetStart().UTC() start = origStart
if endTimeInclusive { if endTimeInclusive {
end = end.Add(-util.SplitGap) end = end.Add(-util.SplitGap)
} }
// query only overlaps ingester query window, nothing more to do // query only overlaps ingester query window or recent metadata query window, nothing more to do
if start.After(end) || start.Equal(end) { if start.After(end) || start.Equal(end) {
return reqs, nil return reqs, nil
} }
// copy the splits, reset the results // copy the splits, reset the results
ingesterSplits = reqs splitsBeforeRebound = reqs
reqs = nil reqs = nil
} else { } else {
start = origStart start = origStart
@ -123,10 +148,10 @@ func (s *defaultSplitter) split(execTime time.Time, tenantIDs []string, req quer
} }
// perform splitting over the rest of the time range // perform splitting over the rest of the time range
util.ForInterval(interval, origStart, end, endTimeInclusive, factory) util.ForInterval(interval, start, end, endTimeInclusive, factory)
// move the ingester splits to the end to maintain correct order // move the ingester or recent metadata splits to the end to maintain correct order
reqs = append(reqs, ingesterSplits...) reqs = append(reqs, splitsBeforeRebound...)
return reqs, nil return reqs, nil
} }
@ -270,6 +295,22 @@ func (s *metricQuerySplitter) buildMetricSplits(step int64, interval time.Durati
} }
} }
func recentMetadataQueryBounds(execTime time.Time, recentMetadataQueryWindow time.Duration, req queryrangebase.Request) (time.Time, time.Time, bool) {
start, end := req.GetStart().UTC(), req.GetEnd().UTC()
windowStart := execTime.UTC().Add(-recentMetadataQueryWindow)
// rebound only if the query end is strictly inside the window
if !windowStart.Before(end) {
return start, end, false
}
if windowStart.Before(start) {
windowStart = start
}
return windowStart, end, true
}
// ingesterQueryBounds determines if we need to split time ranges overlapping the ingester query window (`query_ingesters_within`) // ingesterQueryBounds determines if we need to split time ranges overlapping the ingester query window (`query_ingesters_within`)
// and retrieve the bounds for those specific splits // and retrieve the bounds for those specific splits
func ingesterQueryBounds(execTime time.Time, iqo util.IngesterQueryOptions, req queryrangebase.Request) (time.Time, time.Time, bool) { func ingesterQueryBounds(execTime time.Time, iqo util.IngesterQueryOptions, req queryrangebase.Request) (time.Time, time.Time, bool) {

@ -131,6 +131,7 @@ func NewVolumeCacheMiddleware(
}, },
parallelismForReq, parallelismForReq,
retentionEnabled, retentionEnabled,
false,
metrics, metrics,
) )
} }

@ -58,6 +58,7 @@ type ResultsCache struct {
merger ResponseMerger merger ResponseMerger
shouldCacheReq ShouldCacheReqFn shouldCacheReq ShouldCacheReqFn
shouldCacheRes ShouldCacheResFn shouldCacheRes ShouldCacheResFn
onlyUseEntireExtent bool
parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int
} }
@ -79,7 +80,7 @@ func NewResultsCache(
shouldCacheRes ShouldCacheResFn, shouldCacheRes ShouldCacheResFn,
parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int, parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int,
cacheGenNumberLoader CacheGenNumberLoader, cacheGenNumberLoader CacheGenNumberLoader,
retentionEnabled bool, retentionEnabled, onlyUseEntireExtent bool,
) *ResultsCache { ) *ResultsCache {
return &ResultsCache{ return &ResultsCache{
logger: logger, logger: logger,
@ -95,6 +96,7 @@ func NewResultsCache(
shouldCacheReq: shouldCacheReq, shouldCacheReq: shouldCacheReq,
shouldCacheRes: shouldCacheRes, shouldCacheRes: shouldCacheRes,
parallelismForReq: parallelismForReq, parallelismForReq: parallelismForReq,
onlyUseEntireExtent: onlyUseEntireExtent,
} }
} }
@ -334,6 +336,25 @@ func (s ResultsCache) partition(req Request, extents []Extent) ([]Request, []Res
continue continue
} }
if s.onlyUseEntireExtent && (start > extent.GetStart() || end < extent.GetEnd()) {
// It is not possible to extract the overlapping portion of an extent for all request types.
// Metadata results for one cannot be extracted as the data portion is just a list of strings with no associated timestamp.
// To avoid returning incorrect results, we only use extents that are entirely within the requested query range.
//
// Start End
// ┌────────────────────────┐
// │ Req │
// └────────────────────────┘
//
// ◄──────────────► only this extent can be used. Remaining portion of the query will be added to requests.
//
//
// ◄──────X───────► cannot be partially extracted. will be discarded if onlyUseEntireExtent is set.
// ◄───────X──────►
// ◄───────────────X──────────────────►
continue
}
// If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries. // If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries.
// Hopefully tiny request can make tiny extent into not-so-tiny extent. // Hopefully tiny request can make tiny extent into not-so-tiny extent.
@ -353,6 +374,7 @@ func (s ResultsCache) partition(req Request, extents []Extent) ([]Request, []Res
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
// extract the overlap from the cached extent. // extract the overlap from the cached extent.
cachedResponses = append(cachedResponses, s.extractor.Extract(start, end, res, extent.GetStart(), extent.GetEnd())) cachedResponses = append(cachedResponses, s.extractor.Extract(start, end, res, extent.GetStart(), extent.GetEnd()))
start = extent.End start = extent.End

@ -61,7 +61,6 @@ func TestPartition(t *testing.T) {
mkAPIResponse(0, 100, 10), mkAPIResponse(0, 100, 10),
}, },
}, },
{ {
name: "Test with a complete miss.", name: "Test with a complete miss.",
input: &MockRequest{ input: &MockRequest{
@ -182,6 +181,123 @@ func TestPartition(t *testing.T) {
} }
} }
func TestPartition_onlyUseEntireExtent(t *testing.T) {
for _, tc := range []struct {
name string
input Request
prevCachedResponse []Extent
expectedRequests []Request
expectedCachedResponse []Response
}{
{
name: "overlapping extent - right",
input: &MockRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(60, 120),
},
expectedRequests: []Request{
&MockRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(100),
},
},
},
{
name: "overlapping extent - left",
input: &MockRequest{
Start: time.UnixMilli(20),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(0, 50),
},
expectedRequests: []Request{
&MockRequest{
Start: time.UnixMilli(20),
End: time.UnixMilli(100),
},
},
},
{
name: "overlapping extent larger than the request",
input: &MockRequest{
Start: time.UnixMilli(20),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(0, 120),
},
expectedRequests: []Request{
&MockRequest{
Start: time.UnixMilli(20),
End: time.UnixMilli(100),
},
},
},
{
name: "overlapping extent within the requested query range",
input: &MockRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(120),
},
prevCachedResponse: []Extent{
mkExtent(0, 100),
},
expectedRequests: []Request{
&MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(120),
},
},
expectedCachedResponse: []Response{
mkAPIResponse(0, 100, 10),
},
},
{
name: "multiple overlapping extents",
input: &MockRequest{
Start: time.UnixMilli(50),
End: time.UnixMilli(200),
},
prevCachedResponse: []Extent{
mkExtent(0, 80),
mkExtent(100, 150),
mkExtent(150, 180),
mkExtent(200, 250),
},
expectedRequests: []Request{
&MockRequest{
Start: time.UnixMilli(50),
End: time.UnixMilli(100),
},
&MockRequest{
Start: time.UnixMilli(180),
End: time.UnixMilli(200),
},
},
expectedCachedResponse: []Response{
mkAPIResponse(100, 150, 10),
mkAPIResponse(150, 180, 10),
},
},
} {
t.Run(tc.name, func(t *testing.T) {
s := ResultsCache{
extractor: MockExtractor{},
minCacheExtent: 10,
onlyUseEntireExtent: true,
}
reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse)
require.Nil(t, err)
require.Equal(t, tc.expectedRequests, reqs)
require.Equal(t, tc.expectedCachedResponse, resps)
})
}
}
func TestHandleHit(t *testing.T) { func TestHandleHit(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
name string name string
@ -491,6 +607,7 @@ func TestResultsCacheMaxFreshness(t *testing.T) {
}, },
nil, nil,
false, false,
false,
) )
require.NoError(t, err) require.NoError(t, err)
@ -534,6 +651,7 @@ func Test_resultsCache_MissingData(t *testing.T) {
}, },
nil, nil,
false, false,
false,
) )
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()

@ -107,14 +107,16 @@ type Limits struct {
QueryTimeout model.Duration `yaml:"query_timeout" json:"query_timeout"` QueryTimeout model.Duration `yaml:"query_timeout" json:"query_timeout"`
// Query frontend enforced limits. The default is actually parameterized by the queryrange config. // Query frontend enforced limits. The default is actually parameterized by the queryrange config.
QuerySplitDuration model.Duration `yaml:"split_queries_by_interval" json:"split_queries_by_interval"` QuerySplitDuration model.Duration `yaml:"split_queries_by_interval" json:"split_queries_by_interval"`
MetadataQuerySplitDuration model.Duration `yaml:"split_metadata_queries_by_interval" json:"split_metadata_queries_by_interval"` MetadataQuerySplitDuration model.Duration `yaml:"split_metadata_queries_by_interval" json:"split_metadata_queries_by_interval"`
IngesterQuerySplitDuration model.Duration `yaml:"split_ingester_queries_by_interval" json:"split_ingester_queries_by_interval"` RecentMetadataQuerySplitDuration model.Duration `yaml:"split_recent_metadata_queries_by_interval" json:"split_recent_metadata_queries_by_interval"`
MinShardingLookback model.Duration `yaml:"min_sharding_lookback" json:"min_sharding_lookback"` RecentMetadataQueryWindow model.Duration `yaml:"recent_metadata_query_window" json:"recent_metadata_query_window"`
MaxQueryBytesRead flagext.ByteSize `yaml:"max_query_bytes_read" json:"max_query_bytes_read"` IngesterQuerySplitDuration model.Duration `yaml:"split_ingester_queries_by_interval" json:"split_ingester_queries_by_interval"`
MaxQuerierBytesRead flagext.ByteSize `yaml:"max_querier_bytes_read" json:"max_querier_bytes_read"` MinShardingLookback model.Duration `yaml:"min_sharding_lookback" json:"min_sharding_lookback"`
VolumeEnabled bool `yaml:"volume_enabled" json:"volume_enabled" doc:"description=Enable log-volume endpoints."` MaxQueryBytesRead flagext.ByteSize `yaml:"max_query_bytes_read" json:"max_query_bytes_read"`
VolumeMaxSeries int `yaml:"volume_max_series" json:"volume_max_series" doc:"description=The maximum number of aggregated series in a log-volume response"` MaxQuerierBytesRead flagext.ByteSize `yaml:"max_querier_bytes_read" json:"max_querier_bytes_read"`
VolumeEnabled bool `yaml:"volume_enabled" json:"volume_enabled" doc:"description=Enable log-volume endpoints."`
VolumeMaxSeries int `yaml:"volume_max_series" json:"volume_max_series" doc:"description=The maximum number of aggregated series in a log-volume response"`
// Ruler defaults and limits. // Ruler defaults and limits.
RulerMaxRulesPerRuleGroup int `yaml:"ruler_max_rules_per_rule_group" json:"ruler_max_rules_per_rule_group"` RulerMaxRulesPerRuleGroup int `yaml:"ruler_max_rules_per_rule_group" json:"ruler_max_rules_per_rule_group"`
@ -306,13 +308,14 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
_ = l.QuerySplitDuration.Set("1h") _ = l.QuerySplitDuration.Set("1h")
f.Var(&l.QuerySplitDuration, "querier.split-queries-by-interval", "Split queries by a time interval and execute in parallel. The value 0 disables splitting by time. This also determines how cache keys are chosen when result caching is enabled.") f.Var(&l.QuerySplitDuration, "querier.split-queries-by-interval", "Split queries by a time interval and execute in parallel. The value 0 disables splitting by time. This also determines how cache keys are chosen when result caching is enabled.")
// with metadata caching, it is not possible to extract a subset of labels/series from a cached extent because unlike samples they are not associated with a timestamp.
// as a result, we could return inaccurate results. example: returning results from an entire 1h extent for a 5m query
// Setting max_metadata_cache_freshness to 24h should help us avoid caching recent data and preseve the correctness.
// For the portion of the request beyond the freshness window, granularity of the cached metadata results is determined by split_metadata_queries_by_interval.
_ = l.MetadataQuerySplitDuration.Set("24h") _ = l.MetadataQuerySplitDuration.Set("24h")
f.Var(&l.MetadataQuerySplitDuration, "querier.split-metadata-queries-by-interval", "Split metadata queries by a time interval and execute in parallel. The value 0 disables splitting metadata queries by time. This also determines how cache keys are chosen when label/series result caching is enabled.") f.Var(&l.MetadataQuerySplitDuration, "querier.split-metadata-queries-by-interval", "Split metadata queries by a time interval and execute in parallel. The value 0 disables splitting metadata queries by time. This also determines how cache keys are chosen when label/series result caching is enabled.")
_ = l.RecentMetadataQuerySplitDuration.Set("1h")
f.Var(&l.RecentMetadataQuerySplitDuration, "experimental.querier.split-recent-metadata-queries-by-interval", "Experimental. Split interval to use for the portion of metadata request that falls within `recent_metadata_query_window`. Rest of the request which is outside the window still uses `split_metadata_queries_by_interval`. If set to 0, the entire request defaults to using a split interval of `split_metadata_queries_by_interval.`.")
f.Var(&l.RecentMetadataQueryWindow, "experimental.querier.recent-metadata-query-window", "Experimental. Metadata query window inside which `split_recent_metadata_queries_by_interval` gets applied, portion of the metadata request that falls in this window is split using `split_recent_metadata_queries_by_interval`. The value 0 disables using a different split interval for recent metadata queries.\n\nThis is added to improve cacheability of recent metadata queries. Query split interval also determines the interval used in cache key. The default split interval of 24h is useful for caching long queries, each cache key holding 1 day's results. But metadata queries are often shorter than 24h, to cache them effectively we need a smaller split interval. `recent_metadata_query_window` along with `split_recent_metadata_queries_by_interval` help configure a shorter split interval for recent metadata queries.")
_ = l.IngesterQuerySplitDuration.Set("0s") _ = l.IngesterQuerySplitDuration.Set("0s")
f.Var(&l.IngesterQuerySplitDuration, "querier.split-ingester-queries-by-interval", "Interval to use for time-based splitting when a request is within the `query_ingesters_within` window; defaults to `split-queries-by-interval` by setting to 0.") f.Var(&l.IngesterQuerySplitDuration, "querier.split-ingester-queries-by-interval", "Interval to use for time-based splitting when a request is within the `query_ingesters_within` window; defaults to `split-queries-by-interval` by setting to 0.")
@ -598,6 +601,16 @@ func (o *Overrides) MetadataQuerySplitDuration(userID string) time.Duration {
return time.Duration(o.getOverridesForUser(userID).MetadataQuerySplitDuration) return time.Duration(o.getOverridesForUser(userID).MetadataQuerySplitDuration)
} }
// RecentMetadataQuerySplitDuration returns the tenant specific splitby interval for recent metadata queries.
func (o *Overrides) RecentMetadataQuerySplitDuration(userID string) time.Duration {
return time.Duration(o.getOverridesForUser(userID).RecentMetadataQuerySplitDuration)
}
// RecentMetadataQueryWindow returns the tenant specific time window used to determine recent metadata queries.
func (o *Overrides) RecentMetadataQueryWindow(userID string) time.Duration {
return time.Duration(o.getOverridesForUser(userID).RecentMetadataQueryWindow)
}
// IngesterQuerySplitDuration returns the tenant specific splitby interval applied in the query frontend when querying // IngesterQuerySplitDuration returns the tenant specific splitby interval applied in the query frontend when querying
// during the `query_ingesters_within` window. // during the `query_ingesters_within` window.
func (o *Overrides) IngesterQuerySplitDuration(userID string) time.Duration { func (o *Overrides) IngesterQuerySplitDuration(userID string) time.Duration {

Loading…
Cancel
Save