Extract results cache into new pkg (#11343)

**What this PR does / why we need it**:

This extracts the results cache from `queryrangebase` into its own pkg
so we can reuse it in other components such as the bloom-gateway without
having to import `queryrangebase`.

- Most of the logic inside
`pkg/querier/queryrange/queryrangebase/results_cache.go` now lives in
`pkg/storage/chunk/cache/results_cache/cache.go`.
- Some of the tests in
`pkg/querier/queryrange/queryrangebase/results_cache.go` are moved into
pkg/storage/chunk/cache/results_cache/cache_test.go.
- Note that here we don't have access to the types we use in
`queryrangebase` so we created a new set of mock request/response types
to test with.
pull/11379/head
Salva Corts 2 years ago committed by GitHub
parent 0e433f304e
commit 489ac8d529
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 11
      pkg/logproto/compat.go
  2. 24
      pkg/querier/queryrange/codec.go
  3. 7
      pkg/querier/queryrange/index_stats_cache.go
  4. 13
      pkg/querier/queryrange/index_stats_cache_test.go
  5. 4
      pkg/querier/queryrange/limits.go
  6. 13
      pkg/querier/queryrange/log_result_cache.go
  7. 3
      pkg/querier/queryrange/prometheus.go
  8. 8
      pkg/querier/queryrange/queryrangebase/alias.go
  9. 249
      pkg/querier/queryrange/queryrangebase/definitions/definitions.pb.go
  10. 5
      pkg/querier/queryrange/queryrangebase/definitions/definitions.proto
  11. 6
      pkg/querier/queryrange/queryrangebase/definitions/interface.go
  12. 6
      pkg/querier/queryrange/queryrangebase/middleware.go
  13. 7
      pkg/querier/queryrange/queryrangebase/query_range.go
  14. 776
      pkg/querier/queryrange/queryrangebase/queryrange.pb.go
  15. 23
      pkg/querier/queryrange/queryrangebase/queryrange.proto
  16. 507
      pkg/querier/queryrange/queryrangebase/results_cache.go
  17. 483
      pkg/querier/queryrange/queryrangebase/results_cache_test.go
  18. 22
      pkg/querier/queryrange/queryrangebase/util.go
  19. 16
      pkg/querier/queryrange/querysharding_test.go
  20. 3
      pkg/querier/queryrange/roundtrip.go
  21. 77
      pkg/querier/queryrange/roundtrip_test.go
  22. 7
      pkg/querier/queryrange/volume_cache.go
  23. 14
      pkg/querier/queryrange/volume_cache_test.go
  24. 467
      pkg/storage/chunk/cache/resultscache/cache.go
  25. 605
      pkg/storage/chunk/cache/resultscache/cache_test.go
  26. 41
      pkg/storage/chunk/cache/resultscache/config.go
  27. 56
      pkg/storage/chunk/cache/resultscache/interface.go
  28. 1520
      pkg/storage/chunk/cache/resultscache/test_types.pb.go
  29. 41
      pkg/storage/chunk/cache/resultscache/test_types.proto
  30. 1078
      pkg/storage/chunk/cache/resultscache/types.pb.go
  31. 34
      pkg/storage/chunk/cache/resultscache/types.proto
  32. 67
      pkg/storage/chunk/cache/resultscache/util.go

@ -18,6 +18,7 @@ import (
"github.com/prometheus/prometheus/model/timestamp"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util"
)
@ -260,6 +261,11 @@ func (m *IndexStatsRequest) WithStartEnd(start, end time.Time) definitions.Reque
return &clone
}
// WithStartEndForCache implements resultscache.Request.
func (m *IndexStatsRequest) WithStartEndForCache(start, end time.Time) resultscache.Request {
return m.WithStartEnd(start, end).(resultscache.Request)
}
// WithQuery clone the current request with a different query.
func (m *IndexStatsRequest) WithQuery(query string) definitions.Request {
clone := *m
@ -308,6 +314,11 @@ func (m *VolumeRequest) WithStartEnd(start, end time.Time) definitions.Request {
return &clone
}
// WithStartEndForCache implements resultscache.Request.
func (m *VolumeRequest) WithStartEndForCache(start, end time.Time) resultscache.Request {
return m.WithStartEnd(start, end).(resultscache.Request)
}
// WithQuery clone the current request with a different query.
func (m *VolumeRequest) WithQuery(query string) definitions.Request {
clone := *m

@ -14,6 +14,7 @@ import (
strings "strings"
"time"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/storage/stores/index/seriesvolume"
"github.com/grafana/dskit/httpgrpc"
@ -62,11 +63,9 @@ func (r *LokiRequest) WithStartEnd(s time.Time, e time.Time) queryrangebase.Requ
return &clone
}
func (r *LokiRequest) WithStartEndTime(s time.Time, e time.Time) *LokiRequest {
clone := *r
clone.StartTs = s
clone.EndTs = e
return &clone
// WithStartEndForCache implements resultscache.Request.
func (r *LokiRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request {
return r.WithStartEnd(s, e).(resultscache.Request)
}
func (r *LokiRequest) WithQuery(query string) queryrangebase.Request {
@ -114,6 +113,11 @@ func (r *LokiInstantRequest) WithStartEnd(s time.Time, _ time.Time) queryrangeba
return &clone
}
// WithStartEndForCache implements resultscache.Request.
func (r *LokiInstantRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request {
return r.WithStartEnd(s, e).(resultscache.Request)
}
func (r *LokiInstantRequest) WithQuery(query string) queryrangebase.Request {
clone := *r
clone.Query = query
@ -153,6 +157,11 @@ func (r *LokiSeriesRequest) WithStartEnd(s, e time.Time) queryrangebase.Request
return &clone
}
// WithStartEndForCache implements resultscache.Request.
func (r *LokiSeriesRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request {
return r.WithStartEnd(s, e).(resultscache.Request)
}
func (r *LokiSeriesRequest) WithQuery(_ string) queryrangebase.Request {
clone := *r
return &clone
@ -229,6 +238,11 @@ func (r *LabelRequest) WithStartEnd(s, e time.Time) queryrangebase.Request {
return &clone
}
// WithStartEndForCache implements resultscache.Request.
func (r *LabelRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request {
return r.WithStartEnd(s, e).(resultscache.Request)
}
func (r *LabelRequest) WithQuery(query string) queryrangebase.Request {
clone := *r
clone.Query = query

@ -14,6 +14,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/validation"
)
@ -23,7 +24,7 @@ type IndexStatsSplitter struct {
}
// GenerateCacheKey generates a cache key based on the userID, Request and interval.
func (i IndexStatsSplitter) GenerateCacheKey(ctx context.Context, userID string, r queryrangebase.Request) string {
func (i IndexStatsSplitter) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
cacheKey := i.cacheKeyLimits.GenerateCacheKey(ctx, userID, r)
return fmt.Sprintf("indexStats:%s", cacheKey)
}
@ -32,7 +33,7 @@ type IndexStatsExtractor struct{}
// Extract favors the ability to cache over exactness of results. It assumes a constant distribution
// of log volumes over a range and will extract subsets proportionally.
func (p IndexStatsExtractor) Extract(start, end int64, res queryrangebase.Response, resStart, resEnd int64) queryrangebase.Response {
func (p IndexStatsExtractor) Extract(start, end int64, res resultscache.Response, resStart, resEnd int64) resultscache.Response {
factor := util.GetFactorOfTime(start, end, resStart, resEnd)
statsRes := res.(*IndexStatsResponse)
@ -93,7 +94,7 @@ func NewIndexStatsCacheMiddleware(
c cache.Cache,
cacheGenNumberLoader queryrangebase.CacheGenNumberLoader,
shouldCache queryrangebase.ShouldCacheFn,
parallelismForReq func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int,
parallelismForReq queryrangebase.ParallelismForReqFn,
retentionEnabled bool,
transformer UserIDTransformer,
metrics *queryrangebase.ResultsCacheMetrics,

@ -15,14 +15,17 @@ import (
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/constants"
)
func TestIndexStatsCache(t *testing.T) {
cfg := queryrangebase.ResultsCacheConfig{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
Config: resultscache.Config{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
},
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
@ -158,8 +161,10 @@ func TestIndexStatsCache_RecentData(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
cfg := queryrangebase.ResultsCacheConfig{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
Config: resultscache.Config{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
},
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)

@ -14,7 +14,6 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
@ -28,6 +27,7 @@ import (
"github.com/grafana/loki/pkg/logql/syntax"
queryrange_limits "github.com/grafana/loki/pkg/querier/queryrange/limits"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/index/stats"
util_log "github.com/grafana/loki/pkg/util/log"
@ -104,7 +104,7 @@ type cacheKeyLimits struct {
transformer UserIDTransformer
}
func (l cacheKeyLimits) GenerateCacheKey(ctx context.Context, userID string, r queryrangebase.Request) string {
func (l cacheKeyLimits) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
split := l.QuerySplitDuration(userID)
var currentInterval int64

@ -10,14 +10,13 @@ import (
"github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
"golang.org/x/sync/errgroup"
"github.com/grafana/dskit/tenant"
"github.com/grafana/loki/pkg/loghttp"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logqlmodel/stats"
@ -201,7 +200,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR
// if the response is empty and the query is larger than what is cached, update the cache
if isEmpty(result) && (lokiReq.EndTs.UnixNano()-lokiReq.StartTs.UnixNano() > cachedRequest.EndTs.UnixNano()-cachedRequest.StartTs.UnixNano()) {
cachedRequest = cachedRequest.WithStartEndTime(lokiReq.GetStartTs(), lokiReq.GetEndTs())
cachedRequest = cachedRequest.WithStartEnd(lokiReq.GetStartTs(), lokiReq.GetEndTs()).(*LokiRequest)
updateCache = true
}
} else {
@ -216,7 +215,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR
// if we're missing data at the start, start fetching from the start to the cached start.
if lokiReq.GetStartTs().Before(cachedRequest.GetStartTs()) {
g.Go(func() error {
startRequest = lokiReq.WithStartEndTime(lokiReq.GetStartTs(), cachedRequest.GetStartTs())
startRequest = lokiReq.WithStartEnd(lokiReq.GetStartTs(), cachedRequest.GetStartTs()).(*LokiRequest)
resp, err := l.next.Do(ctx, startRequest)
if err != nil {
return err
@ -233,7 +232,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR
// if we're missing data at the end, start fetching from the cached end to the end.
if lokiReq.GetEndTs().After(cachedRequest.GetEndTs()) {
g.Go(func() error {
endRequest = lokiReq.WithStartEndTime(cachedRequest.GetEndTs(), lokiReq.GetEndTs())
endRequest = lokiReq.WithStartEnd(cachedRequest.GetEndTs(), lokiReq.GetEndTs()).(*LokiRequest)
resp, err := l.next.Do(ctx, endRequest)
if err != nil {
return err
@ -255,7 +254,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR
// If it's not empty only merge the response.
if startResp != nil {
if isEmpty(startResp) {
cachedRequest = cachedRequest.WithStartEndTime(startRequest.GetStartTs(), cachedRequest.GetEndTs())
cachedRequest = cachedRequest.WithStartEnd(startRequest.GetStartTs(), cachedRequest.GetEndTs()).(*LokiRequest)
updateCache = true
} else {
if startResp.Status != loghttp.QueryStatusSuccess {
@ -269,7 +268,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR
// If it's not empty only merge the response.
if endResp != nil {
if isEmpty(endResp) {
cachedRequest = cachedRequest.WithStartEndTime(cachedRequest.GetStartTs(), endRequest.GetEndTs())
cachedRequest = cachedRequest.WithStartEnd(cachedRequest.GetStartTs(), endRequest.GetEndTs()).(*LokiRequest)
updateCache = true
} else {
if endResp.Status != loghttp.QueryStatusSuccess {

@ -14,6 +14,7 @@ import (
"github.com/grafana/loki/pkg/loghttp"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
)
var (
@ -25,7 +26,7 @@ var (
type PrometheusExtractor struct{}
// Extract wraps the original prometheus cache extractor
func (PrometheusExtractor) Extract(start, end int64, res queryrangebase.Response, resStart, resEnd int64) queryrangebase.Response {
func (PrometheusExtractor) Extract(start, end int64, res resultscache.Response, resStart, resEnd int64) resultscache.Response {
response := extractor.Extract(start, end, res.(*LokiPromResponse).Response, resStart, resEnd)
return &LokiPromResponse{
Response: response.(*queryrangebase.PrometheusResponse),

@ -1,6 +1,9 @@
package queryrangebase
import "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
import (
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
)
// Helpful aliases for refactoring circular imports
@ -9,5 +12,8 @@ type PrometheusResponseHeader = definitions.PrometheusResponseHeader
type PrometheusRequestHeader = definitions.PrometheusRequestHeader
type Codec = definitions.Codec
type Merger = definitions.Merger
type CacheGenNumberLoader = resultscache.CacheGenNumberLoader
type Request = definitions.Request
type Response = definitions.Response
type Extent = resultscache.Extent

@ -25,50 +25,6 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Defined here to prevent circular imports between logproto & queryrangebase
type CachingOptions struct {
Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"`
}
func (m *CachingOptions) Reset() { *m = CachingOptions{} }
func (*CachingOptions) ProtoMessage() {}
func (*CachingOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_d1a37772b6ae2c5c, []int{0}
}
func (m *CachingOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CachingOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CachingOptions.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *CachingOptions) XXX_Merge(src proto.Message) {
xxx_messageInfo_CachingOptions.Merge(m, src)
}
func (m *CachingOptions) XXX_Size() int {
return m.Size()
}
func (m *CachingOptions) XXX_DiscardUnknown() {
xxx_messageInfo_CachingOptions.DiscardUnknown(m)
}
var xxx_messageInfo_CachingOptions proto.InternalMessageInfo
func (m *CachingOptions) GetDisabled() bool {
if m != nil {
return m.Disabled
}
return false
}
type PrometheusRequestHeader struct {
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"-"`
Values []string `protobuf:"bytes,2,rep,name=Values,proto3" json:"-"`
@ -77,7 +33,7 @@ type PrometheusRequestHeader struct {
func (m *PrometheusRequestHeader) Reset() { *m = PrometheusRequestHeader{} }
func (*PrometheusRequestHeader) ProtoMessage() {}
func (*PrometheusRequestHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_d1a37772b6ae2c5c, []int{1}
return fileDescriptor_d1a37772b6ae2c5c, []int{0}
}
func (m *PrometheusRequestHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -128,7 +84,7 @@ type PrometheusResponseHeader struct {
func (m *PrometheusResponseHeader) Reset() { *m = PrometheusResponseHeader{} }
func (*PrometheusResponseHeader) ProtoMessage() {}
func (*PrometheusResponseHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_d1a37772b6ae2c5c, []int{2}
return fileDescriptor_d1a37772b6ae2c5c, []int{1}
}
func (m *PrometheusResponseHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -172,7 +128,6 @@ func (m *PrometheusResponseHeader) GetValues() []string {
}
func init() {
proto.RegisterType((*CachingOptions)(nil), "definitions.CachingOptions")
proto.RegisterType((*PrometheusRequestHeader)(nil), "definitions.PrometheusRequestHeader")
proto.RegisterType((*PrometheusResponseHeader)(nil), "definitions.PrometheusResponseHeader")
}
@ -182,52 +137,26 @@ func init() {
}
var fileDescriptor_d1a37772b6ae2c5c = []byte{
// 294 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0x31, 0x4e, 0x33, 0x31,
0x10, 0x85, 0xed, 0xff, 0x87, 0x28, 0x18, 0x89, 0x62, 0x85, 0x44, 0x88, 0xc4, 0x10, 0xa5, 0xa2,
0x80, 0xb8, 0xe0, 0x06, 0x49, 0x03, 0x0d, 0xa0, 0x80, 0x28, 0xe8, 0xbc, 0xd9, 0x89, 0x63, 0x25,
0xb1, 0x37, 0xf6, 0x6e, 0x41, 0x05, 0x47, 0xe0, 0x18, 0x1c, 0x85, 0x72, 0xcb, 0x54, 0x88, 0xf5,
0x36, 0x88, 0x2a, 0x47, 0x40, 0x18, 0x04, 0xdb, 0x22, 0xaa, 0x79, 0xf3, 0xcd, 0x9b, 0x57, 0xcc,
0xb0, 0x41, 0x3a, 0x95, 0x7c, 0x91, 0xa3, 0x55, 0x68, 0x43, 0xbd, 0xb5, 0x42, 0x4b, 0xac, 0xc9,
0x58, 0x38, 0xe4, 0x09, 0x8e, 0x95, 0x56, 0x99, 0x32, 0xda, 0xd5, 0x75, 0x2f, 0xb5, 0x26, 0x33,
0xd1, 0x66, 0x0d, 0xb5, 0xb7, 0xa5, 0x91, 0x26, 0x70, 0xfe, 0xa1, 0x3e, 0x2d, 0xdd, 0x43, 0xb6,
0x35, 0x10, 0xa3, 0x89, 0xd2, 0xf2, 0x3c, 0x0d, 0xbe, 0xa8, 0xcd, 0x9a, 0x89, 0x72, 0x22, 0x9e,
0x61, 0xd2, 0xa2, 0x1d, 0x7a, 0xd0, 0x1c, 0x7e, 0xf7, 0xdd, 0x4b, 0xb6, 0x73, 0x61, 0xcd, 0x1c,
0xb3, 0x09, 0xe6, 0x6e, 0x88, 0x8b, 0x1c, 0x5d, 0x76, 0x82, 0x22, 0x41, 0x1b, 0xed, 0xb2, 0xb5,
0x33, 0x31, 0xc7, 0xb0, 0xb2, 0xd1, 0x5f, 0x7f, 0x7b, 0xde, 0xa7, 0x47, 0xc3, 0x80, 0xa2, 0x3d,
0xd6, 0xb8, 0x16, 0xb3, 0x1c, 0x5d, 0xeb, 0x5f, 0xe7, 0xff, 0xcf, 0xf0, 0x0b, 0x76, 0xaf, 0x58,
0xab, 0x1e, 0xea, 0x52, 0xa3, 0x1d, 0xfe, 0x35, 0xb5, 0x7f, 0x57, 0x94, 0x40, 0x96, 0x25, 0x90,
0x55, 0x09, 0xf4, 0xde, 0x03, 0x7d, 0xf4, 0x40, 0x9f, 0x3c, 0xd0, 0xc2, 0x03, 0x7d, 0xf1, 0x40,
0x5f, 0x3d, 0x90, 0x95, 0x07, 0xfa, 0x50, 0x01, 0x29, 0x2a, 0x20, 0xcb, 0x0a, 0xc8, 0xcd, 0xa9,
0x54, 0xd9, 0x24, 0x8f, 0x7b, 0x23, 0x33, 0xe7, 0xd2, 0x8a, 0xb1, 0xd0, 0x82, 0xcf, 0xcc, 0x54,
0xf1, 0x5f, 0xbf, 0x23, 0x6e, 0x84, 0x03, 0x1f, 0xbf, 0x07, 0x00, 0x00, 0xff, 0xff, 0x09, 0x36,
0xa9, 0xa5, 0xca, 0x01, 0x00, 0x00,
// 262 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x2e, 0xc8, 0x4e, 0xd7,
0x2f, 0x2c, 0x4d, 0x2d, 0xca, 0x4c, 0x2d, 0x02, 0xd3, 0x95, 0x45, 0x89, 0x79, 0xe9, 0xa9, 0x48,
0xcc, 0xa4, 0xc4, 0xe2, 0x54, 0xfd, 0x94, 0xd4, 0xb4, 0xcc, 0xbc, 0xcc, 0x92, 0xcc, 0xfc, 0xbc,
0x62, 0x64, 0xb6, 0x5e, 0x41, 0x51, 0x7e, 0x49, 0xbe, 0x10, 0x37, 0x92, 0x90, 0x94, 0x48, 0x7a,
0x7e, 0x7a, 0x3e, 0x58, 0x5c, 0x1f, 0xc4, 0x82, 0x28, 0x51, 0x0a, 0xe6, 0x12, 0x0f, 0x28, 0xca,
0xcf, 0x4d, 0x2d, 0xc9, 0x48, 0x2d, 0x2d, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0xf1, 0x48,
0x4d, 0x4c, 0x49, 0x2d, 0x12, 0x92, 0xe4, 0x62, 0xf1, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60,
0xd4, 0xe0, 0x74, 0x62, 0x7d, 0x75, 0x4f, 0x9e, 0x51, 0x37, 0x08, 0x2c, 0x24, 0x24, 0xcb, 0xc5,
0x16, 0x96, 0x98, 0x53, 0x9a, 0x5a, 0x2c, 0xc1, 0xa4, 0xc0, 0x8c, 0x90, 0x84, 0x0a, 0x2a, 0x85,
0x70, 0x49, 0x20, 0x1b, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x4a, 0xa9, 0xa9, 0x4e, 0xf5, 0x17,
0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6,
0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39,
0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63,
0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x3c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92,
0xf3, 0x73, 0xf5, 0xd3, 0x8b, 0x12, 0xd3, 0x12, 0xf3, 0x12, 0xf5, 0x73, 0xf2, 0xb3, 0x33, 0xf5,
0x49, 0x0e, 0xe0, 0x24, 0x36, 0x70, 0x90, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x51, 0x1b,
0x61, 0xc9, 0x9c, 0x01, 0x00, 0x00,
}
func (this *CachingOptions) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*CachingOptions)
if !ok {
that2, ok := that.(CachingOptions)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Disabled != that1.Disabled {
return false
}
return true
}
func (this *PrometheusRequestHeader) Equal(that interface{}) bool {
if that == nil {
return this == nil
@ -292,16 +221,6 @@ func (this *PrometheusResponseHeader) Equal(that interface{}) bool {
}
return true
}
func (this *CachingOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&definitions.CachingOptions{")
s = append(s, "Disabled: "+fmt.Sprintf("%#v", this.Disabled)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *PrometheusRequestHeader) GoString() string {
if this == nil {
return "nil"
@ -332,39 +251,6 @@ func valueToGoStringDefinitions(v interface{}, typ string) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *CachingOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CachingOptions) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CachingOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Disabled {
i--
if m.Disabled {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *PrometheusRequestHeader) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -454,18 +340,6 @@ func encodeVarintDefinitions(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
func (m *CachingOptions) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Disabled {
n += 2
}
return n
}
func (m *PrometheusRequestHeader) Size() (n int) {
if m == nil {
return 0
@ -510,16 +384,6 @@ func sovDefinitions(x uint64) (n int) {
func sozDefinitions(x uint64) (n int) {
return sovDefinitions(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *CachingOptions) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CachingOptions{`,
`Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`,
`}`,
}, "")
return s
}
func (this *PrometheusRequestHeader) String() string {
if this == nil {
return "nil"
@ -550,79 +414,6 @@ func valueToStringDefinitions(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *CachingOptions) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDefinitions
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CachingOptions: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CachingOptions: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDefinitions
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Disabled = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipDefinitions(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthDefinitions
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthDefinitions
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PrometheusRequestHeader) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0

@ -8,11 +8,6 @@ option go_package = "github.com/grafana/loki/pkg/querier/queryrange/queryrangeba
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
// Defined here to prevent circular imports between logproto & queryrangebase
message CachingOptions {
bool disabled = 1;
}
message PrometheusRequestHeader {
string Name = 1 [(gogoproto.jsontag) = "-"];
repeated string Values = 2 [(gogoproto.jsontag) = "-"];

@ -7,6 +7,8 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/opentracing/opentracing-go"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
)
// Codec is used to encode/decode query range requests and responses so they can be passed down to middlewares.
@ -32,6 +34,7 @@ type Merger interface {
// Request represents a query range request that can be process by middlewares.
type Request interface {
proto.Message
// GetStart returns the start timestamp of the request in milliseconds.
GetStart() time.Time
// GetEnd returns the end timestamp of the request in milliseconds.
@ -46,11 +49,12 @@ type Request interface {
WithStartEnd(start time.Time, end time.Time) Request
// WithQuery clone the current request with a different query.
WithQuery(string) Request
proto.Message
// LogToSpan writes information about this request to an OpenTracing span
LogToSpan(opentracing.Span)
}
type CachingOptions = resultscache.CachingOptions
// Response represents a query range response.
type Response interface {
proto.Message

@ -6,6 +6,8 @@ import (
"github.com/grafana/dskit/middleware"
"github.com/grafana/dskit/tenant"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
)
const (
@ -13,7 +15,7 @@ const (
ResultsCacheGenNumberHeaderName = "Results-Cache-Gen-Number"
)
func CacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader CacheGenNumberLoader) middleware.Interface {
func CacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader resultscache.CacheGenNumberLoader) middleware.Interface {
return middleware.Func(func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
userIDs, err := tenant.TenantIDs(r.Context())
@ -30,7 +32,7 @@ func CacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader CacheGenNumberLo
})
}
func CacheGenNumberContextSetterMiddleware(cacheGenNumbersLoader CacheGenNumberLoader) Middleware {
func CacheGenNumberContextSetterMiddleware(cacheGenNumbersLoader resultscache.CacheGenNumberLoader) Middleware {
return MiddlewareFunc(func(next Handler) Handler {
return HandlerFunc(func(ctx context.Context, req Request) (Response, error) {
userIDs, err := tenant.TenantIDs(ctx)

@ -20,6 +20,7 @@ import (
"github.com/prometheus/prometheus/model/timestamp"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util/spanlogger"
)
@ -54,6 +55,12 @@ func (q *PrometheusRequest) WithStartEnd(start, end time.Time) Request {
return &clone
}
// WithStartEndForCache implements resultscache.Request.
func (q *PrometheusRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request {
clone := q.WithStartEnd(s, e).(resultscache.Request)
return clone
}
// WithQuery clones the current `PrometheusRequest` with a new query.
func (q *PrometheusRequest) WithQuery(query string) Request {
clone := *q

@ -7,12 +7,13 @@ import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
_ "github.com/gogo/protobuf/types"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
types "github.com/gogo/protobuf/types"
_ "github.com/golang/protobuf/ptypes/duration"
github_com_grafana_loki_pkg_logproto "github.com/grafana/loki/pkg/logproto"
logproto "github.com/grafana/loki/pkg/logproto"
definitions "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
resultscache "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
io "io"
math "math"
math_bits "math/bits"
@ -40,7 +41,7 @@ type PrometheusRequest struct {
Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"`
Timeout time.Duration `protobuf:"bytes,5,opt,name=timeout,proto3,stdduration" json:"timeout"`
Query string `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"`
CachingOptions definitions.CachingOptions `protobuf:"bytes,7,opt,name=cachingOptions,proto3" json:"cachingOptions"`
CachingOptions resultscache.CachingOptions `protobuf:"bytes,7,opt,name=cachingOptions,proto3" json:"cachingOptions"`
Headers []*definitions.PrometheusRequestHeader `protobuf:"bytes,8,rep,name=Headers,proto3" json:"-"`
}
@ -118,11 +119,11 @@ func (m *PrometheusRequest) GetQuery() string {
return ""
}
func (m *PrometheusRequest) GetCachingOptions() definitions.CachingOptions {
func (m *PrometheusRequest) GetCachingOptions() resultscache.CachingOptions {
if m != nil {
return m.CachingOptions
}
return definitions.CachingOptions{}
return resultscache.CachingOptions{}
}
func (m *PrometheusRequest) GetHeaders() []*definitions.PrometheusRequestHeader {
@ -302,132 +303,11 @@ func (m *SampleStream) GetSamples() []logproto.LegacySample {
return nil
}
type CachedResponse struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"`
// List of cached responses; non-overlapping and in order.
Extents []Extent `protobuf:"bytes,2,rep,name=extents,proto3" json:"extents"`
}
func (m *CachedResponse) Reset() { *m = CachedResponse{} }
func (*CachedResponse) ProtoMessage() {}
func (*CachedResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_4cc6a0c1d6b614c4, []int{4}
}
func (m *CachedResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CachedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CachedResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *CachedResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CachedResponse.Merge(m, src)
}
func (m *CachedResponse) XXX_Size() int {
return m.Size()
}
func (m *CachedResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CachedResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CachedResponse proto.InternalMessageInfo
func (m *CachedResponse) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *CachedResponse) GetExtents() []Extent {
if m != nil {
return m.Extents
}
return nil
}
type Extent struct {
Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start"`
End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end"`
TraceId string `protobuf:"bytes,4,opt,name=trace_id,json=traceId,proto3" json:"-"`
Response *types.Any `protobuf:"bytes,5,opt,name=response,proto3" json:"response"`
}
func (m *Extent) Reset() { *m = Extent{} }
func (*Extent) ProtoMessage() {}
func (*Extent) Descriptor() ([]byte, []int) {
return fileDescriptor_4cc6a0c1d6b614c4, []int{5}
}
func (m *Extent) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Extent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Extent.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Extent) XXX_Merge(src proto.Message) {
xxx_messageInfo_Extent.Merge(m, src)
}
func (m *Extent) XXX_Size() int {
return m.Size()
}
func (m *Extent) XXX_DiscardUnknown() {
xxx_messageInfo_Extent.DiscardUnknown(m)
}
var xxx_messageInfo_Extent proto.InternalMessageInfo
func (m *Extent) GetStart() int64 {
if m != nil {
return m.Start
}
return 0
}
func (m *Extent) GetEnd() int64 {
if m != nil {
return m.End
}
return 0
}
func (m *Extent) GetTraceId() string {
if m != nil {
return m.TraceId
}
return ""
}
func (m *Extent) GetResponse() *types.Any {
if m != nil {
return m.Response
}
return nil
}
func init() {
proto.RegisterType((*PrometheusRequest)(nil), "queryrangebase.PrometheusRequest")
proto.RegisterType((*PrometheusResponse)(nil), "queryrangebase.PrometheusResponse")
proto.RegisterType((*PrometheusData)(nil), "queryrangebase.PrometheusData")
proto.RegisterType((*SampleStream)(nil), "queryrangebase.SampleStream")
proto.RegisterType((*CachedResponse)(nil), "queryrangebase.CachedResponse")
proto.RegisterType((*Extent)(nil), "queryrangebase.Extent")
}
func init() {
@ -435,60 +315,54 @@ func init() {
}
var fileDescriptor_4cc6a0c1d6b614c4 = []byte{
// 846 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xdc, 0x44,
0x14, 0x5f, 0xc7, 0xfb, 0x77, 0x5a, 0x6d, 0x61, 0x1a, 0x15, 0x27, 0x45, 0xf6, 0x6a, 0x05, 0x52,
0x90, 0xc0, 0x2b, 0x8a, 0xe8, 0x01, 0xa9, 0x88, 0x38, 0x09, 0xa2, 0x55, 0x25, 0x2a, 0xa7, 0x27,
0x2e, 0x68, 0x76, 0xfd, 0xe2, 0x58, 0xf1, 0xbf, 0xce, 0x8c, 0x2b, 0xf6, 0xc6, 0x89, 0x73, 0x4f,
0x88, 0x8f, 0xc0, 0x01, 0xf1, 0x39, 0x72, 0xcc, 0xb1, 0xe2, 0x60, 0xc8, 0xe6, 0x82, 0x7c, 0xea,
0x47, 0x40, 0xf3, 0xc7, 0xbb, 0xde, 0x8d, 0x02, 0xf4, 0xb2, 0xfb, 0x66, 0xde, 0xef, 0xbd, 0xf7,
0x7b, 0xbf, 0x79, 0x7e, 0xe8, 0x61, 0x7e, 0x16, 0x4e, 0x5e, 0x14, 0x40, 0x23, 0xa0, 0xf2, 0x7f,
0x4e, 0x49, 0x1a, 0x42, 0xc3, 0x9c, 0x12, 0xd6, 0x3c, 0xba, 0x39, 0xcd, 0x78, 0x86, 0x87, 0xeb,
0x80, 0xdd, 0xed, 0x30, 0x0b, 0x33, 0xe9, 0x9a, 0x08, 0x4b, 0xa1, 0x76, 0x77, 0xc2, 0x2c, 0x0b,
0x63, 0x98, 0xc8, 0xd3, 0xb4, 0x38, 0x99, 0x90, 0x74, 0xae, 0x5d, 0xf6, 0xa6, 0x2b, 0x28, 0x28,
0xe1, 0x51, 0x96, 0x6a, 0xbf, 0xb3, 0xe9, 0xe7, 0x51, 0x02, 0x8c, 0x93, 0x24, 0xd7, 0x80, 0xfb,
0x82, 0x79, 0x9c, 0x85, 0xaa, 0x68, 0x6d, 0x68, 0xe7, 0xc1, 0xff, 0x6b, 0x2b, 0x80, 0x93, 0x28,
0x8d, 0x44, 0x55, 0xd6, 0xb4, 0x55, 0x92, 0xf1, 0xcf, 0x26, 0x7a, 0xf7, 0x19, 0xcd, 0x12, 0xe0,
0xa7, 0x50, 0x30, 0x1f, 0x5e, 0x14, 0xc0, 0x38, 0xc6, 0xa8, 0x9d, 0x13, 0x7e, 0x6a, 0x19, 0x23,
0x63, 0x6f, 0xe0, 0x4b, 0x1b, 0x7f, 0x81, 0x3a, 0x8c, 0x13, 0xca, 0xad, 0xad, 0x91, 0xb1, 0x77,
0xeb, 0xc1, 0xae, 0xab, 0xc8, 0xbb, 0x35, 0x79, 0xf7, 0x79, 0x4d, 0xde, 0xeb, 0x9f, 0x97, 0x4e,
0xeb, 0xd5, 0x9f, 0x8e, 0xe1, 0xab, 0x10, 0xfc, 0x10, 0x99, 0x90, 0x06, 0x96, 0xf9, 0x16, 0x91,
0x22, 0x40, 0xf0, 0x60, 0x1c, 0x72, 0xab, 0x3d, 0x32, 0xf6, 0x4c, 0x5f, 0xda, 0xf8, 0x11, 0xea,
0x09, 0x99, 0xb2, 0x82, 0x5b, 0x1d, 0x99, 0x6f, 0xe7, 0x5a, 0xbe, 0x43, 0x2d, 0xb3, 0x4a, 0xf7,
0x8b, 0x48, 0x57, 0xc7, 0xe0, 0x6d, 0xd4, 0x91, 0x02, 0x59, 0x5d, 0xd9, 0x9b, 0x3a, 0xe0, 0xc7,
0x68, 0x38, 0x23, 0xb3, 0xd3, 0x28, 0x0d, 0xbf, 0xcd, 0xa5, 0x3c, 0x56, 0x4f, 0xe6, 0xbe, 0xef,
0x36, 0x25, 0x3b, 0x58, 0x83, 0x78, 0x6d, 0x91, 0xdd, 0xdf, 0x08, 0xc4, 0x47, 0xa8, 0xf7, 0x0d,
0x90, 0x00, 0x28, 0xb3, 0xfa, 0x23, 0x73, 0xef, 0xd6, 0x83, 0x0f, 0xd6, 0x72, 0x5c, 0x13, 0x5b,
0x81, 0xbd, 0x4e, 0x55, 0x3a, 0xc6, 0x27, 0x7e, 0x1d, 0x3b, 0xfe, 0x7d, 0x0b, 0xe1, 0x26, 0x96,
0xe5, 0x59, 0xca, 0x00, 0x8f, 0x51, 0xf7, 0x98, 0x13, 0x5e, 0x30, 0xf5, 0x36, 0x1e, 0xaa, 0x4a,
0xa7, 0xcb, 0xe4, 0x8d, 0xaf, 0x3d, 0xf8, 0x09, 0x6a, 0x1f, 0x12, 0x4e, 0xf4, 0x43, 0xd9, 0xee,
0xfa, 0x40, 0x34, 0x18, 0x08, 0x94, 0x77, 0x4f, 0x74, 0x51, 0x95, 0xce, 0x30, 0x20, 0x9c, 0x7c,
0x9c, 0x25, 0x11, 0x87, 0x24, 0xe7, 0x73, 0x5f, 0xe6, 0xc0, 0x9f, 0xa3, 0xc1, 0x11, 0xa5, 0x19,
0x7d, 0x3e, 0xcf, 0x41, 0xbe, 0xdf, 0xc0, 0x7b, 0xaf, 0x2a, 0x9d, 0xbb, 0x50, 0x5f, 0x36, 0x22,
0x56, 0x48, 0xfc, 0x11, 0xea, 0xc8, 0x83, 0x7c, 0xb9, 0x81, 0x77, 0xb7, 0x2a, 0x9d, 0x3b, 0x32,
0xa4, 0x01, 0x57, 0x08, 0xfc, 0xf5, 0x4a, 0xaf, 0x8e, 0xd4, 0xeb, 0xc3, 0x1b, 0xf5, 0x52, 0x1a,
0xdc, 0x20, 0xd8, 0x4f, 0x06, 0x1a, 0xae, 0xb7, 0x86, 0x5d, 0x84, 0x7c, 0x60, 0x45, 0xcc, 0x25,
0x7b, 0x25, 0xd8, 0xb0, 0x2a, 0x1d, 0x44, 0x97, 0xb7, 0x7e, 0x03, 0x81, 0x0f, 0x51, 0x57, 0x9d,
0xac, 0x2d, 0xc9, 0xe4, 0xfd, 0x4d, 0xe9, 0x8e, 0x49, 0x92, 0xc7, 0x70, 0xcc, 0x29, 0x90, 0xc4,
0x1b, 0x6a, 0xe1, 0xba, 0x2a, 0x9b, 0xaf, 0x63, 0xc7, 0xe7, 0x06, 0xba, 0xdd, 0x04, 0xe2, 0x97,
0xa8, 0x1b, 0x93, 0x29, 0xc4, 0xe2, 0xcd, 0x4c, 0x39, 0xb0, 0xcb, 0x2f, 0xf9, 0x29, 0x84, 0x64,
0x36, 0x7f, 0x2a, 0xbc, 0xcf, 0x48, 0x44, 0xbd, 0x03, 0x91, 0xf3, 0x8f, 0xd2, 0xf9, 0x34, 0x8c,
0xf8, 0x69, 0x31, 0x75, 0x67, 0x59, 0x32, 0x09, 0x29, 0x39, 0x21, 0x29, 0x99, 0xc4, 0xd9, 0x59,
0x34, 0x69, 0x2e, 0x04, 0x57, 0xc6, 0xed, 0x07, 0x24, 0xe7, 0x40, 0x05, 0x91, 0x04, 0x38, 0x8d,
0x66, 0xbe, 0xae, 0x86, 0xbf, 0x42, 0x3d, 0x26, 0x79, 0x30, 0xdd, 0xcf, 0xbd, 0xcd, 0xc2, 0x8a,
0xe6, 0xaa, 0x93, 0x97, 0x24, 0x2e, 0x80, 0xf9, 0x75, 0xd8, 0x38, 0x45, 0x43, 0x31, 0xf3, 0x10,
0x2c, 0xe7, 0x6f, 0x07, 0x99, 0x67, 0x30, 0xd7, 0x5a, 0xf6, 0xaa, 0xd2, 0x11, 0x47, 0x5f, 0xfc,
0xe0, 0x7d, 0xd4, 0x83, 0x1f, 0x38, 0xa4, 0x7c, 0x55, 0x6e, 0x43, 0xbe, 0x23, 0xe9, 0xf6, 0xee,
0xe8, 0x72, 0x35, 0xdc, 0xaf, 0x8d, 0xf1, 0x6f, 0x06, 0xea, 0x2a, 0x10, 0x76, 0xea, 0x75, 0x23,
0x4a, 0x99, 0xde, 0xa0, 0x2a, 0x1d, 0x75, 0x51, 0xef, 0x94, 0x1d, 0xb5, 0x53, 0xb6, 0xa4, 0x5b,
0x32, 0x81, 0x34, 0x50, 0x6b, 0x63, 0x84, 0xfa, 0x9c, 0x92, 0x19, 0x7c, 0x1f, 0x05, 0x7a, 0x00,
0xeb, 0x61, 0x91, 0xd7, 0x8f, 0x03, 0xfc, 0x25, 0xea, 0x53, 0xdd, 0x92, 0xde, 0x22, 0xdb, 0xd7,
0xb6, 0xc8, 0x7e, 0x3a, 0xf7, 0x6e, 0x57, 0xa5, 0xb3, 0x44, 0xfa, 0x4b, 0xeb, 0x49, 0xbb, 0x6f,
0xbe, 0xd3, 0xf6, 0xd8, 0xc5, 0xa5, 0xdd, 0x7a, 0x7d, 0x69, 0xb7, 0xde, 0x5c, 0xda, 0xc6, 0x8f,
0x0b, 0xdb, 0xf8, 0x75, 0x61, 0x1b, 0xe7, 0x0b, 0xdb, 0xb8, 0x58, 0xd8, 0xc6, 0x5f, 0x0b, 0xdb,
0xf8, 0x7b, 0x61, 0xb7, 0xde, 0x2c, 0x6c, 0xe3, 0xd5, 0x95, 0xdd, 0xba, 0xb8, 0xb2, 0x5b, 0xaf,
0xaf, 0xec, 0xd6, 0x77, 0x8f, 0xfe, 0xed, 0x6d, 0xff, 0x73, 0x9f, 0x4f, 0xbb, 0x92, 0xe0, 0x67,
0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x63, 0x5c, 0x0b, 0x88, 0xd6, 0x06, 0x00, 0x00,
// 739 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4f, 0xdb, 0x48,
0x18, 0x8d, 0xc9, 0x2f, 0x32, 0xac, 0xb2, 0xda, 0x01, 0xb1, 0x5e, 0x16, 0x8d, 0xa3, 0x68, 0x57,
0xca, 0x4a, 0x5b, 0xbb, 0xa5, 0x2a, 0x87, 0x4a, 0x48, 0xad, 0x81, 0xaa, 0x42, 0x48, 0x45, 0x86,
0x53, 0x6f, 0x93, 0x64, 0x70, 0x2c, 0x6c, 0x8f, 0x99, 0x19, 0x23, 0xe5, 0xd6, 0x53, 0xcf, 0xdc,
0xda, 0x3f, 0xa1, 0xa7, 0xfe, 0x1d, 0x1c, 0x39, 0xa2, 0x1e, 0xdc, 0x12, 0x2e, 0x55, 0x4e, 0xfc,
0x09, 0xd5, 0xcc, 0x38, 0xe0, 0x04, 0xd1, 0x1f, 0xa7, 0xcc, 0xe7, 0xef, 0xbd, 0x37, 0xef, 0x7b,
0x9f, 0x63, 0xb0, 0x9e, 0x1c, 0xf9, 0xce, 0x71, 0x4a, 0x58, 0x40, 0x98, 0xfa, 0x1d, 0x32, 0x1c,
0xfb, 0xa4, 0x70, 0xec, 0x62, 0x5e, 0x2c, 0xed, 0x84, 0x51, 0x41, 0x61, 0x73, 0x1a, 0xb0, 0xb2,
0xe4, 0x53, 0x9f, 0xaa, 0x96, 0x23, 0x4f, 0x1a, 0xb5, 0x82, 0x7c, 0x4a, 0xfd, 0x90, 0x38, 0xaa,
0xea, 0xa6, 0x87, 0x4e, 0x3f, 0x65, 0x58, 0x04, 0x34, 0xce, 0xfb, 0xd6, 0x6c, 0x5f, 0x04, 0x11,
0xe1, 0x02, 0x47, 0x49, 0x0e, 0xf8, 0x5b, 0xda, 0x0b, 0xa9, 0xaf, 0x95, 0x27, 0x87, 0xbc, 0xb9,
0xf9, 0x73, 0xde, 0xfb, 0xe4, 0x30, 0x88, 0x03, 0x79, 0x2b, 0x2f, 0x9e, 0x73, 0x91, 0x87, 0x52,
0x84, 0x0b, 0xca, 0xb0, 0x4f, 0x9c, 0xde, 0x20, 0x8d, 0x8f, 0x9c, 0x1e, 0xee, 0x0d, 0x88, 0xc3,
0x08, 0x4f, 0x43, 0xc1, 0x75, 0x21, 0x86, 0x09, 0xc9, 0x19, 0xed, 0x77, 0x65, 0xf0, 0xc7, 0x1e,
0xa3, 0x11, 0x11, 0x03, 0x92, 0x72, 0x8f, 0x1c, 0xa7, 0x84, 0x0b, 0x08, 0x41, 0x25, 0xc1, 0x62,
0x60, 0x1a, 0x2d, 0xa3, 0xd3, 0xf0, 0xd4, 0x19, 0x3e, 0x05, 0x55, 0x2e, 0x30, 0x13, 0xe6, 0x5c,
0xcb, 0xe8, 0x2c, 0xac, 0xad, 0xd8, 0x7a, 0x5c, 0x7b, 0x32, 0xae, 0x7d, 0x30, 0x19, 0xd7, 0x9d,
0x3f, 0xcb, 0xac, 0xd2, 0xe9, 0x67, 0xcb, 0xf0, 0x34, 0x05, 0xae, 0x83, 0x32, 0x89, 0xfb, 0x66,
0xf9, 0x17, 0x98, 0x92, 0x20, 0x7d, 0x70, 0x41, 0x12, 0xb3, 0xd2, 0x32, 0x3a, 0x65, 0x4f, 0x9d,
0xe1, 0x06, 0xa8, 0xcb, 0x60, 0x69, 0x2a, 0xcc, 0xaa, 0xd2, 0xfb, 0xeb, 0x8e, 0xde, 0x56, 0xbe,
0x18, 0x2d, 0xf7, 0x5e, 0xca, 0x4d, 0x38, 0x70, 0x09, 0x54, 0x55, 0xa4, 0x66, 0x4d, 0xcd, 0xa6,
0x0b, 0xb8, 0x03, 0x9a, 0x32, 0x9b, 0x20, 0xf6, 0x5f, 0x25, 0x2a, 0x50, 0xb3, 0xae, 0xb4, 0x57,
0xed, 0x62, 0x72, 0xf6, 0xe6, 0x14, 0xc6, 0xad, 0x48, 0x79, 0x6f, 0x86, 0x09, 0xb7, 0x41, 0xfd,
0x25, 0xc1, 0x7d, 0xc2, 0xb8, 0x39, 0xdf, 0x2a, 0x77, 0x16, 0xd6, 0xfe, 0xb1, 0x8b, 0x9b, 0xba,
0x93, 0xb6, 0x06, 0xbb, 0xd5, 0x71, 0x66, 0x19, 0x0f, 0xbc, 0x09, 0xb7, 0xfd, 0x71, 0x0e, 0xc0,
0x22, 0x96, 0x27, 0x34, 0xe6, 0x04, 0xb6, 0x41, 0x6d, 0x5f, 0x60, 0x91, 0x72, 0xbd, 0x1c, 0x17,
0x8c, 0x33, 0xab, 0xc6, 0xd5, 0x13, 0x2f, 0xef, 0xc0, 0x1d, 0x50, 0xd9, 0xc2, 0x02, 0xe7, 0x9b,
0x42, 0xf6, 0xf4, 0x3b, 0x54, 0x70, 0x20, 0x51, 0xee, 0xb2, 0x9c, 0x62, 0x9c, 0x59, 0xcd, 0x3e,
0x16, 0xf8, 0x7f, 0x1a, 0x05, 0x82, 0x44, 0x89, 0x18, 0x7a, 0x4a, 0x03, 0x3e, 0x01, 0x8d, 0x6d,
0xc6, 0x28, 0x3b, 0x18, 0x26, 0x44, 0x2d, 0xb0, 0xe1, 0xfe, 0x39, 0xce, 0xac, 0x45, 0x32, 0x79,
0x58, 0x60, 0xdc, 0x22, 0xe1, 0x7f, 0xa0, 0xaa, 0x0a, 0xb5, 0xba, 0x86, 0xbb, 0x38, 0xce, 0xac,
0xdf, 0x15, 0xa5, 0x00, 0xd7, 0x08, 0xf8, 0xe2, 0x36, 0xaf, 0xaa, 0xca, 0xeb, 0xdf, 0x7b, 0xf3,
0xd2, 0x19, 0xdc, 0x13, 0xd8, 0x5b, 0x03, 0x34, 0xa7, 0x47, 0x83, 0x36, 0x00, 0x9e, 0xda, 0x9f,
0x72, 0xaf, 0x03, 0x6b, 0x8e, 0x33, 0x0b, 0xb0, 0x9b, 0xa7, 0x5e, 0x01, 0x01, 0xb7, 0x40, 0x4d,
0x57, 0xe6, 0x9c, 0x72, 0xb2, 0x3a, 0x1b, 0xdd, 0x3e, 0x8e, 0x92, 0x90, 0xec, 0x0b, 0x46, 0x70,
0xe4, 0x36, 0xf3, 0xe0, 0x6a, 0x5a, 0xcd, 0xcb, 0xb9, 0xed, 0x33, 0x03, 0xfc, 0x56, 0x04, 0xc2,
0x13, 0x50, 0x0b, 0x71, 0x97, 0x84, 0x72, 0x67, 0x65, 0xf5, 0xc6, 0xde, 0xfc, 0xf9, 0x77, 0x89,
0x8f, 0x7b, 0xc3, 0x5d, 0xd9, 0xdd, 0xc3, 0x01, 0x73, 0x37, 0xa5, 0xe6, 0xa7, 0xcc, 0x7a, 0xe4,
0x07, 0x62, 0x90, 0x76, 0xed, 0x1e, 0x8d, 0x1c, 0x9f, 0xe1, 0x43, 0x1c, 0x63, 0x27, 0xa4, 0x47,
0x81, 0x53, 0xfc, 0x86, 0xd8, 0x8a, 0xf7, 0xbc, 0x8f, 0x13, 0x41, 0x98, 0x34, 0x12, 0x11, 0xc1,
0x82, 0x9e, 0x97, 0xdf, 0x06, 0x9f, 0x81, 0x3a, 0x57, 0x3e, 0x78, 0x3e, 0xcf, 0xf2, 0xec, 0xc5,
0xda, 0xe6, 0xed, 0x24, 0x27, 0x38, 0x4c, 0x09, 0xf7, 0x26, 0x34, 0x97, 0x9f, 0x5f, 0xa2, 0xd2,
0xc5, 0x25, 0x2a, 0x5d, 0x5f, 0x22, 0xe3, 0xcd, 0x08, 0x19, 0x1f, 0x46, 0xc8, 0x38, 0x1b, 0x21,
0xe3, 0x7c, 0x84, 0x8c, 0x2f, 0x23, 0x64, 0x7c, 0x1d, 0xa1, 0xd2, 0xf5, 0x08, 0x19, 0xa7, 0x57,
0xa8, 0x74, 0x7e, 0x85, 0x4a, 0x17, 0x57, 0xa8, 0xf4, 0x7a, 0xe3, 0x7b, 0xe6, 0x7f, 0xf8, 0x8d,
0xeb, 0xd6, 0x94, 0xc3, 0xc7, 0xdf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfe, 0xcd, 0xe4, 0x4f, 0xcf,
0x05, 0x00, 0x00,
}
func (this *PrometheusRequest) Equal(that interface{}) bool {
@ -651,71 +525,6 @@ func (this *SampleStream) Equal(that interface{}) bool {
}
return true
}
func (this *CachedResponse) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*CachedResponse)
if !ok {
that2, ok := that.(CachedResponse)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Key != that1.Key {
return false
}
if len(this.Extents) != len(that1.Extents) {
return false
}
for i := range this.Extents {
if !this.Extents[i].Equal(&that1.Extents[i]) {
return false
}
}
return true
}
func (this *Extent) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*Extent)
if !ok {
that2, ok := that.(Extent)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Start != that1.Start {
return false
}
if this.End != that1.End {
return false
}
if this.TraceId != that1.TraceId {
return false
}
if !this.Response.Equal(that1.Response) {
return false
}
return true
}
func (this *PrometheusRequest) GoString() string {
if this == nil {
return "nil"
@ -785,38 +594,6 @@ func (this *SampleStream) GoString() string {
s = append(s, "}")
return strings.Join(s, "")
}
func (this *CachedResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&queryrangebase.CachedResponse{")
s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n")
if this.Extents != nil {
vs := make([]*Extent, len(this.Extents))
for i := range vs {
vs[i] = &this.Extents[i]
}
s = append(s, "Extents: "+fmt.Sprintf("%#v", vs)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Extent) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&queryrangebase.Extent{")
s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n")
if this.Response != nil {
s = append(s, "Response: "+fmt.Sprintf("%#v", this.Response)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringQueryrange(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@ -1078,102 +855,6 @@ func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *CachedResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CachedResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CachedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Extents) > 0 {
for iNdEx := len(m.Extents) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Extents[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintQueryrange(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Key) > 0 {
i -= len(m.Key)
copy(dAtA[i:], m.Key)
i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Extent) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Extent) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Extent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Response != nil {
{
size, err := m.Response.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintQueryrange(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
if len(m.TraceId) > 0 {
i -= len(m.TraceId)
copy(dAtA[i:], m.TraceId)
i = encodeVarintQueryrange(dAtA, i, uint64(len(m.TraceId)))
i--
dAtA[i] = 0x22
}
if m.End != 0 {
i = encodeVarintQueryrange(dAtA, i, uint64(m.End))
i--
dAtA[i] = 0x10
}
if m.Start != 0 {
i = encodeVarintQueryrange(dAtA, i, uint64(m.Start))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintQueryrange(dAtA []byte, offset int, v uint64) int {
offset -= sovQueryrange(v)
base := offset
@ -1288,48 +969,6 @@ func (m *SampleStream) Size() (n int) {
return n
}
func (m *CachedResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovQueryrange(uint64(l))
}
if len(m.Extents) > 0 {
for _, e := range m.Extents {
l = e.Size()
n += 1 + l + sovQueryrange(uint64(l))
}
}
return n
}
func (m *Extent) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Start != 0 {
n += 1 + sovQueryrange(uint64(m.Start))
}
if m.End != 0 {
n += 1 + sovQueryrange(uint64(m.End))
}
l = len(m.TraceId)
if l > 0 {
n += 1 + l + sovQueryrange(uint64(l))
}
if m.Response != nil {
l = m.Response.Size()
n += 1 + l + sovQueryrange(uint64(l))
}
return n
}
func sovQueryrange(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@ -1352,7 +991,7 @@ func (this *PrometheusRequest) String() string {
`Step:` + fmt.Sprintf("%v", this.Step) + `,`,
`Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`,
`Query:` + fmt.Sprintf("%v", this.Query) + `,`,
`CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "definitions.CachingOptions", 1), `&`, ``, 1) + `,`,
`CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "resultscache.CachingOptions", 1), `&`, ``, 1) + `,`,
`Headers:` + repeatedStringForHeaders + `,`,
`}`,
}, "")
@ -1409,35 +1048,6 @@ func (this *SampleStream) String() string {
}, "")
return s
}
func (this *CachedResponse) String() string {
if this == nil {
return "nil"
}
repeatedStringForExtents := "[]Extent{"
for _, f := range this.Extents {
repeatedStringForExtents += strings.Replace(strings.Replace(f.String(), "Extent", "Extent", 1), `&`, ``, 1) + ","
}
repeatedStringForExtents += "}"
s := strings.Join([]string{`&CachedResponse{`,
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
`Extents:` + repeatedStringForExtents + `,`,
`}`,
}, "")
return s
}
func (this *Extent) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Extent{`,
`Start:` + fmt.Sprintf("%v", this.Start) + `,`,
`End:` + fmt.Sprintf("%v", this.End) + `,`,
`TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`,
`Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "Any", "types.Any", 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringQueryrange(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@ -2204,284 +1814,6 @@ func (m *SampleStream) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *CachedResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQueryrange
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CachedResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CachedResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQueryrange
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthQueryrange
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthQueryrange
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Extents", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQueryrange
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthQueryrange
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthQueryrange
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Extents = append(m.Extents, Extent{})
if err := m.Extents[len(m.Extents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQueryrange(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthQueryrange
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthQueryrange
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Extent) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQueryrange
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Extent: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Extent: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
}
m.Start = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQueryrange
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Start |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
}
m.End = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQueryrange
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.End |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQueryrange
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthQueryrange
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthQueryrange
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TraceId = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQueryrange
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthQueryrange
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthQueryrange
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Response == nil {
m.Response = &types.Any{}
}
if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQueryrange(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthQueryrange
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthQueryrange
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipQueryrange(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0

@ -3,11 +3,11 @@ syntax = "proto3";
package queryrangebase;
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
import "pkg/logproto/logproto.proto";
import "pkg/querier/queryrange/queryrangebase/definitions/definitions.proto";
import "pkg/storage/chunk/cache/resultscache/types.proto";
option go_package = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase";
option (gogoproto.marshaler_all) = true;
@ -29,7 +29,7 @@ message PrometheusRequest {
(gogoproto.nullable) = false
];
string query = 6;
definitions.CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false];
resultscache.CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false];
repeated definitions.PrometheusRequestHeader Headers = 8 [(gogoproto.jsontag) = "-"];
}
@ -63,22 +63,3 @@ message SampleStream {
(gogoproto.jsontag) = "values"
];
}
message CachedResponse {
string key = 1 [(gogoproto.jsontag) = "key"];
// List of cached responses; non-overlapping and in order.
repeated Extent extents = 2 [
(gogoproto.nullable) = false,
(gogoproto.jsontag) = "extents"
];
}
message Extent {
int64 start = 1 [(gogoproto.jsontag) = "start"];
int64 end = 2 [(gogoproto.jsontag) = "end"];
// reserved the previous key to ensure cache transition
reserved 3;
string trace_id = 4 [(gogoproto.jsontag) = "-"];
google.protobuf.Any response = 5 [(gogoproto.jsontag) = "response"];
}

@ -4,35 +4,21 @@ import (
"context"
"flag"
"fmt"
"net/http"
"sort"
"strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/user"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/uber/jaeger-client-go"
"github.com/grafana/dskit/tenant"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util/constants"
"github.com/grafana/loki/pkg/util/math"
"github.com/grafana/loki/pkg/util/spanlogger"
"github.com/grafana/loki/pkg/util/validation"
)
var (
@ -65,20 +51,9 @@ func NewResultsCacheMetrics(registerer prometheus.Registerer) *ResultsCacheMetri
}
}
type CacheGenNumberLoader interface {
GetResultsCacheGenNumber(tenantIDs []string) string
Stop()
}
// ResultsCacheConfig is the config for the results cache.
type ResultsCacheConfig struct {
CacheConfig cache.Config `yaml:"cache"`
Compression string `yaml:"compression"`
}
func (cfg *ResultsCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
cfg.CacheConfig.RegisterFlagsWithPrefix(prefix, "", f)
f.StringVar(&cfg.Compression, prefix+"compression", "", "Use compression in cache. The default is an empty value '', which disables compression. Supported values are: 'snappy' and ''.")
resultscache.Config `yaml:",inline"`
}
// RegisterFlags registers flags.
@ -86,22 +61,9 @@ func (cfg *ResultsCacheConfig) RegisterFlags(f *flag.FlagSet) {
cfg.RegisterFlagsWithPrefix(f, "frontend.")
}
func (cfg *ResultsCacheConfig) Validate() error {
switch cfg.Compression {
case "snappy", "":
// valid
default:
return errors.Errorf("unsupported compression type: %s", cfg.Compression)
}
return nil
}
// Extractor is used by the cache to extract a subset of a response from a cache entry.
type Extractor interface {
// Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds
// in the `res` response which spans from `resStart` to `resEnd`.
Extract(start, end int64, res Response, resStart, resEnd int64) Response
resultscache.Extractor
ResponseWithoutHeaders(resp Response) Response
}
@ -109,7 +71,7 @@ type Extractor interface {
type PrometheusResponseExtractor struct{}
// Extract extracts response for specific a range from a response.
func (PrometheusResponseExtractor) Extract(start, end int64, res Response, _, _ int64) Response {
func (PrometheusResponseExtractor) Extract(start, end int64, res resultscache.Response, _, _ int64) resultscache.Response {
promRes := res.(*PrometheusResponse)
return &PrometheusResponse{
Status: StatusSuccess,
@ -134,39 +96,17 @@ func (PrometheusResponseExtractor) ResponseWithoutHeaders(resp Response) Respons
}
}
// CacheSplitter generates cache keys. This is a useful interface for downstream
// consumers who wish to implement their own strategies.
type CacheSplitter interface {
GenerateCacheKey(ctx context.Context, userID string, r Request) string
}
// constSplitter is a utility for using a constant split interval when determining cache keys
type constSplitter time.Duration
// GenerateCacheKey generates a cache key based on the userID, Request and interval.
func (t constSplitter) GenerateCacheKey(_ context.Context, userID string, r Request) string {
currentInterval := r.GetStart().UnixMilli() / int64(time.Duration(t)/time.Millisecond)
return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval)
}
// ShouldCacheFn checks whether the current request should go to cache
// or not. If not, just send the request to next handler.
type ShouldCacheFn func(ctx context.Context, r Request) bool
// ParallelismForReqFn returns the parallelism for a given request.
type ParallelismForReqFn func(ctx context.Context, tenantIDs []string, r Request) int
type resultsCache struct {
logger log.Logger
next Handler
cache cache.Cache
limits Limits
splitter CacheSplitter
extractor Extractor
minCacheExtent int64 // discard any cache extent smaller than this
merger Merger
cacheGenNumberLoader CacheGenNumberLoader
shouldCache ShouldCacheFn
parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int
retentionEnabled bool
cache *resultscache.ResultsCache
logger log.Logger
cacheGenNumberLoader resultscache.CacheGenNumberLoader
metrics *ResultsCacheMetrics
}
@ -179,13 +119,13 @@ type resultsCache struct {
func NewResultsCacheMiddleware(
logger log.Logger,
c cache.Cache,
splitter CacheSplitter,
keygen resultscache.KeyGenerator,
limits Limits,
merger Merger,
extractor Extractor,
cacheGenNumberLoader CacheGenNumberLoader,
cacheGenNumberLoader resultscache.CacheGenNumberLoader,
shouldCache ShouldCacheFn,
parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int,
parallelismForReq ParallelismForReqFn,
retentionEnabled bool,
metrics *ResultsCacheMetrics,
) (Middleware, error) {
@ -193,78 +133,63 @@ func NewResultsCacheMiddleware(
c = cache.NewCacheGenNumMiddleware(c)
}
out := &resultsCache{
logger: logger,
cacheGenNumberLoader: cacheGenNumberLoader,
metrics: metrics,
}
return MiddlewareFunc(func(next Handler) Handler {
return &resultsCache{
logger: logger,
next: next,
cache: c,
limits: limits,
merger: merger,
extractor: extractor,
minCacheExtent: (5 * time.Minute).Milliseconds(),
splitter: splitter,
cacheGenNumberLoader: cacheGenNumberLoader,
shouldCache: shouldCache,
parallelismForReq: parallelismForReq,
retentionEnabled: retentionEnabled,
metrics: metrics,
nextCacheWrapper := resultscache.HandlerFunc(func(ctx context.Context, req resultscache.Request) (resultscache.Response, error) {
return next.Do(ctx, req.(Request))
})
shouldCacheReqWrapper := func(ctx context.Context, req resultscache.Request) bool {
if shouldCache == nil {
return true
}
return shouldCache(ctx, req.(Request))
}
shouldCacheResWrapper := func(ctx context.Context, req resultscache.Request, res resultscache.Response, maxCacheTime int64) bool {
return out.shouldCacheResponse(ctx, req.(Request), res.(Response), maxCacheTime)
}
parallelismForReqWrapper := func(ctx context.Context, tenantIDs []string, req resultscache.Request) int {
return parallelismForReq(ctx, tenantIDs, req.(Request))
}
out.cache = resultscache.NewResultsCache(
logger,
c,
nextCacheWrapper,
keygen,
limits,
FromQueryResponseMergerToCacheResponseMerger(merger),
extractor,
shouldCacheReqWrapper,
shouldCacheResWrapper,
parallelismForReqWrapper,
cacheGenNumberLoader,
retentionEnabled,
)
return out
}), nil
}
func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "resultsCache.Do")
defer sp.Finish()
tenantIDs, err := tenant.TenantIDs(ctx)
res, err := s.cache.Do(ctx, r.(resultscache.Request))
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
if s.shouldCache != nil && !s.shouldCache(ctx, r) {
return s.next.Do(ctx, r)
}
if s.cacheGenNumberLoader != nil && s.retentionEnabled {
ctx = cache.InjectCacheGenNumber(ctx, s.cacheGenNumberLoader.GetResultsCacheGenNumber(tenantIDs))
}
var (
key = s.splitter.GenerateCacheKey(ctx, tenant.JoinTenantIDs(tenantIDs), r)
extents []Extent
response Response
)
sp.LogKV(
"query", r.GetQuery(),
"step", time.UnixMilli(r.GetStep()),
"start", r.GetStart(),
"end", r.GetEnd(),
"key", key,
)
cacheFreshnessCapture := func(id string) time.Duration { return s.limits.MaxCacheFreshness(ctx, id) }
maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture)
maxCacheTime := int64(model.Now().Add(-maxCacheFreshness))
if r.GetStart().UnixMilli() > maxCacheTime {
return s.next.Do(ctx, r)
}
cached, ok := s.get(ctx, key)
if ok {
response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime)
} else {
response, extents, err = s.handleMiss(ctx, r, maxCacheTime)
return nil, err
}
if err == nil && len(extents) > 0 {
extents, err := s.filterRecentExtents(r, maxCacheFreshness, extents)
if err != nil {
return nil, err
}
s.put(ctx, key, extents)
queryRes, ok := res.(Response)
if !ok {
return nil, fmt.Errorf("could not cast cache response to query response")
}
return response, err
return queryRes, nil
}
// shouldCacheResponse says whether the response should be cached or not.
@ -379,303 +304,6 @@ func getHeaderValuesWithName(r Response, headerName string) (headerValues []stri
return
}
func (s resultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) {
response, err := s.next.Do(ctx, r)
if err != nil {
return nil, nil, err
}
if !s.shouldCacheResponse(ctx, r, response, maxCacheTime) {
return response, []Extent{}, nil
}
extent, err := toExtent(ctx, r, s.extractor.ResponseWithoutHeaders(response))
if err != nil {
return nil, nil, err
}
extents := []Extent{
extent,
}
return response, extents, nil
}
func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) {
var (
reqResps []RequestResponse
err error
)
sp, ctx := opentracing.StartSpanFromContext(ctx, "handleHit")
defer sp.Finish()
log := spanlogger.FromContext(ctx)
defer log.Finish()
requests, responses, err := s.partition(r, extents)
if err != nil {
return nil, nil, err
}
if len(requests) == 0 {
response, err := s.merger.MergeResponse(responses...)
// No downstream requests so no need to write back to the cache.
return response, nil, err
}
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
reqResps, err = DoRequests(ctx, s.next, requests, s.parallelismForReq(ctx, tenantIDs, r))
if err != nil {
return nil, nil, err
}
for _, reqResp := range reqResps {
responses = append(responses, reqResp.Response)
if !s.shouldCacheResponse(ctx, r, reqResp.Response, maxCacheTime) {
continue
}
extent, err := toExtent(ctx, reqResp.Request, s.extractor.ResponseWithoutHeaders(reqResp.Response))
if err != nil {
return nil, nil, err
}
extents = append(extents, extent)
}
sort.Slice(extents, func(i, j int) bool {
if extents[i].Start == extents[j].Start {
// as an optimization, for two extents starts at the same time, we
// put bigger extent at the front of the slice, which helps
// to reduce the amount of merge we have to do later.
return extents[i].End > extents[j].End
}
return extents[i].Start < extents[j].Start
})
// Merge any extents - potentially overlapping
accumulator, err := newAccumulator(extents[0])
if err != nil {
return nil, nil, err
}
mergedExtents := make([]Extent, 0, len(extents))
for i := 1; i < len(extents); i++ {
if accumulator.End+r.GetStep() < extents[i].Start {
mergedExtents, err = merge(mergedExtents, accumulator)
if err != nil {
return nil, nil, err
}
accumulator, err = newAccumulator(extents[i])
if err != nil {
return nil, nil, err
}
continue
}
if accumulator.End >= extents[i].End {
continue
}
accumulator.TraceId = jaegerTraceID(ctx)
accumulator.End = extents[i].End
currentRes, err := extents[i].toResponse()
if err != nil {
return nil, nil, err
}
merged, err := s.merger.MergeResponse(accumulator.Response, currentRes)
if err != nil {
return nil, nil, err
}
accumulator.Response = merged
}
mergedExtents, err = merge(mergedExtents, accumulator)
if err != nil {
return nil, nil, err
}
response, err := s.merger.MergeResponse(responses...)
return response, mergedExtents, err
}
type accumulator struct {
Response
Extent
}
func merge(extents []Extent, acc *accumulator) ([]Extent, error) {
anyResp, err := types.MarshalAny(acc.Response)
if err != nil {
return nil, err
}
return append(extents, Extent{
Start: acc.Extent.Start,
End: acc.Extent.End,
Response: anyResp,
TraceId: acc.Extent.TraceId,
}), nil
}
func newAccumulator(base Extent) (*accumulator, error) {
res, err := base.toResponse()
if err != nil {
return nil, err
}
return &accumulator{
Response: res,
Extent: base,
}, nil
}
func toExtent(ctx context.Context, req Request, res Response) (Extent, error) {
anyResp, err := types.MarshalAny(res)
if err != nil {
return Extent{}, err
}
return Extent{
Start: req.GetStart().UnixMilli(),
End: req.GetEnd().UnixMilli(),
Response: anyResp,
TraceId: jaegerTraceID(ctx),
}, nil
}
// partition calculates the required requests to satisfy req given the cached data.
// extents must be in order by start time.
func (s resultsCache) partition(req Request, extents []Extent) ([]Request, []Response, error) {
var requests []Request
var cachedResponses []Response
start := req.GetStart().UnixMilli()
end := req.GetEnd().UnixMilli()
for _, extent := range extents {
// If there is no overlap, ignore this extent.
if extent.GetEnd() < start || extent.Start > end {
continue
}
// If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries.
// Hopefully tiny request can make tiny extent into not-so-tiny extent.
// However if the step is large enough, the split_query_by_interval middleware would generate a query with same start and end.
// For example, if the step size is more than 12h and the interval is 24h.
// This means the extent's start and end time would be same, even if the timerange covers several hours.
if (req.GetStart() != req.GetEnd()) && ((end - start) > s.minCacheExtent) && (extent.End-extent.Start < s.minCacheExtent) {
continue
}
// If there is a bit missing at the front, make a request for that.
if start < extent.Start {
r := req.WithStartEnd(time.UnixMilli(start), time.UnixMilli(extent.Start))
requests = append(requests, r)
}
res, err := extent.toResponse()
if err != nil {
return nil, nil, err
}
// extract the overlap from the cached extent.
cachedResponses = append(cachedResponses, s.extractor.Extract(start, end, res, extent.GetStart(), extent.GetEnd()))
start = extent.End
}
// Lastly, make a request for any data missing at the end.
if start < req.GetEnd().UnixMilli() {
r := req.WithStartEnd(time.UnixMilli(start), time.UnixMilli(end))
requests = append(requests, r)
}
// If start and end are the same (valid in promql), start == req.GetEnd() and we won't do the query.
// But we should only do the request if we don't have a valid cached response for it.
if req.GetStart() == req.GetEnd() && len(cachedResponses) == 0 {
requests = append(requests, req)
}
return requests, cachedResponses, nil
}
func (s resultsCache) filterRecentExtents(req Request, maxCacheFreshness time.Duration, extents []Extent) ([]Extent, error) {
step := math.Max64(1, req.GetStep())
maxCacheTime := (int64(model.Now().Add(-maxCacheFreshness)) / step) * step
for i := range extents {
// Never cache data for the latest freshness period.
if extents[i].End > maxCacheTime {
extents[i].End = maxCacheTime
res, err := extents[i].toResponse()
if err != nil {
return nil, err
}
extracted := s.extractor.Extract(extents[i].GetStart(), maxCacheTime, res, extents[i].GetStart(), extents[i].GetEnd())
anyResp, err := types.MarshalAny(extracted)
if err != nil {
return nil, err
}
extents[i].Response = anyResp
}
}
return extents, nil
}
func (s resultsCache) get(ctx context.Context, key string) ([]Extent, bool) {
found, bufs, _, _ := s.cache.Fetch(ctx, []string{cache.HashKey(key)})
if len(found) != 1 {
return nil, false
}
var resp CachedResponse
sp, ctx := opentracing.StartSpanFromContext(ctx, "unmarshal-extent") //nolint:ineffassign,staticcheck
defer sp.Finish()
log := spanlogger.FromContext(ctx)
defer log.Finish()
log.LogFields(otlog.Int("bytes", len(bufs[0])))
if err := proto.Unmarshal(bufs[0], &resp); err != nil {
level.Error(log).Log("msg", "error unmarshalling cached value", "err", err)
log.Error(err)
return nil, false
}
if resp.Key != key {
return nil, false
}
// Refreshes the cache if it contains an old proto schema.
for _, e := range resp.Extents {
if e.Response == nil {
return nil, false
}
}
return resp.Extents, true
}
func (s resultsCache) put(ctx context.Context, key string, extents []Extent) {
buf, err := proto.Marshal(&CachedResponse{
Key: key,
Extents: extents,
})
if err != nil {
level.Error(s.logger).Log("msg", "error marshalling cached value", "err", err)
return
}
_ = s.cache.Store(ctx, []string{cache.HashKey(key)}, [][]byte{buf})
}
func jaegerTraceID(ctx context.Context) string {
span := opentracing.SpanFromContext(ctx)
if span == nil {
return ""
}
spanContext, ok := span.Context().(jaeger.SpanContext)
if !ok {
return ""
}
return spanContext.TraceID().String()
}
func extractMatrix(start, end int64, matrix []SampleStream) []SampleStream {
result := make([]SampleStream, 0, len(matrix))
for _, stream := range matrix {
@ -702,20 +330,3 @@ func extractSampleStream(start, end int64, stream SampleStream) (SampleStream, b
}
return result, true
}
func (e *Extent) toResponse() (Response, error) {
msg, err := types.EmptyAny(e.Response)
if err != nil {
return nil, err
}
if err := types.UnmarshalAny(e.Response, msg); err != nil {
return nil, err
}
resp, ok := msg.(Response)
if !ok {
return nil, fmt.Errorf("bad cached type")
}
return resp, nil
}

@ -3,7 +3,6 @@ package queryrangebase
import (
"context"
"fmt"
"strconv"
"testing"
"time"
@ -18,6 +17,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util/constants"
)
@ -398,362 +398,13 @@ func TestShouldCache(t *testing.T) {
}
}
func TestPartition(t *testing.T) {
for _, tc := range []struct {
name string
input Request
prevCachedResponse []Extent
expectedRequests []Request
expectedCachedResponse []Response
}{
{
name: "Test a complete hit.",
input: &PrometheusRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(0, 100),
},
expectedCachedResponse: []Response{
mkAPIResponse(0, 100, 10),
},
},
{
name: "Test with a complete miss.",
input: &PrometheusRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(110, 210),
},
expectedRequests: []Request{
&PrometheusRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(100),
},
},
},
{
name: "Test a partial hit.",
input: &PrometheusRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(50, 100),
},
expectedRequests: []Request{
&PrometheusRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(50),
},
},
expectedCachedResponse: []Response{
mkAPIResponse(50, 100, 10),
},
},
{
name: "Test multiple partial hits.",
input: &PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(200),
},
prevCachedResponse: []Extent{
mkExtent(50, 120),
mkExtent(160, 250),
},
expectedRequests: []Request{
&PrometheusRequest{
Start: time.UnixMilli(120),
End: time.UnixMilli(160),
},
},
expectedCachedResponse: []Response{
mkAPIResponse(100, 120, 10),
mkAPIResponse(160, 200, 10),
},
},
{
name: "Partial hits with tiny gap.",
input: &PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(160),
},
prevCachedResponse: []Extent{
mkExtent(50, 120),
mkExtent(122, 130),
},
expectedRequests: []Request{
&PrometheusRequest{
Start: time.UnixMilli(120),
End: time.UnixMilli(160),
},
},
expectedCachedResponse: []Response{
mkAPIResponse(100, 120, 10),
},
},
{
name: "Extent is outside the range and the request has a single step (same start and end).",
input: &PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(50, 90),
},
expectedRequests: []Request{
&PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(100),
},
},
},
{
name: "Test when hit has a large step and only a single sample extent.",
// If there is a only a single sample in the split interval, start and end will be the same.
input: &PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(100, 100),
},
expectedCachedResponse: []Response{
mkAPIResponse(100, 105, 10),
},
},
} {
t.Run(tc.name, func(t *testing.T) {
s := resultsCache{
extractor: PrometheusResponseExtractor{},
minCacheExtent: 10,
}
reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse)
require.Nil(t, err)
require.Equal(t, tc.expectedRequests, reqs)
require.Equal(t, tc.expectedCachedResponse, resps)
})
}
}
func TestHandleHit(t *testing.T) {
for _, tc := range []struct {
name string
input Request
cachedEntry []Extent
expectedUpdatedCachedEntry []Extent
}{
{
name: "Should drop tiny extent that overlaps with non-tiny request only",
input: &PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(120),
Step: 5,
},
cachedEntry: []Extent{
mkExtentWithStep(0, 50, 5),
mkExtentWithStep(60, 65, 5),
mkExtentWithStep(100, 105, 5),
mkExtentWithStep(110, 150, 5),
mkExtentWithStep(160, 165, 5),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(0, 50, 5),
mkExtentWithStep(60, 65, 5),
mkExtentWithStep(100, 150, 5),
mkExtentWithStep(160, 165, 5),
},
},
{
name: "Should replace tiny extents that are cover by bigger request",
input: &PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(200),
Step: 5,
},
cachedEntry: []Extent{
mkExtentWithStep(0, 50, 5),
mkExtentWithStep(60, 65, 5),
mkExtentWithStep(100, 105, 5),
mkExtentWithStep(110, 115, 5),
mkExtentWithStep(120, 125, 5),
mkExtentWithStep(220, 225, 5),
mkExtentWithStep(240, 250, 5),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(0, 50, 5),
mkExtentWithStep(60, 65, 5),
mkExtentWithStep(100, 200, 5),
mkExtentWithStep(220, 225, 5),
mkExtentWithStep(240, 250, 5),
},
},
{
name: "Should not drop tiny extent that completely overlaps with tiny request",
input: &PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(105),
Step: 5,
},
cachedEntry: []Extent{
mkExtentWithStep(0, 50, 5),
mkExtentWithStep(60, 65, 5),
mkExtentWithStep(100, 105, 5),
mkExtentWithStep(160, 165, 5),
},
expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
},
{
name: "Should not drop tiny extent that partially center-overlaps with tiny request",
input: &PrometheusRequest{
Start: time.UnixMilli(106),
End: time.UnixMilli(108),
Step: 2,
},
cachedEntry: []Extent{
mkExtentWithStep(60, 64, 2),
mkExtentWithStep(104, 110, 2),
mkExtentWithStep(160, 166, 2),
},
expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
},
{
name: "Should not drop tiny extent that partially left-overlaps with tiny request",
input: &PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(106),
Step: 2,
},
cachedEntry: []Extent{
mkExtentWithStep(60, 64, 2),
mkExtentWithStep(104, 110, 2),
mkExtentWithStep(160, 166, 2),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(60, 64, 2),
mkExtentWithStep(100, 110, 2),
mkExtentWithStep(160, 166, 2),
},
},
{
name: "Should not drop tiny extent that partially right-overlaps with tiny request",
input: &PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(106),
Step: 2,
},
cachedEntry: []Extent{
mkExtentWithStep(60, 64, 2),
mkExtentWithStep(98, 102, 2),
mkExtentWithStep(160, 166, 2),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(60, 64, 2),
mkExtentWithStep(98, 106, 2),
mkExtentWithStep(160, 166, 2),
},
},
{
name: "Should merge fragmented extents if request fills the hole",
input: &PrometheusRequest{
Start: time.UnixMilli(40),
End: time.UnixMilli(80),
Step: 20,
},
cachedEntry: []Extent{
mkExtentWithStep(0, 20, 20),
mkExtentWithStep(80, 100, 20),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(0, 100, 20),
},
},
{
name: "Should left-extend extent if request starts earlier than extent in cache",
input: &PrometheusRequest{
Start: time.UnixMilli(40),
End: time.UnixMilli(80),
Step: 20,
},
cachedEntry: []Extent{
mkExtentWithStep(60, 160, 20),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(40, 160, 20),
},
},
{
name: "Should right-extend extent if request ends later than extent in cache",
input: &PrometheusRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(180),
Step: 20,
},
cachedEntry: []Extent{
mkExtentWithStep(60, 160, 20),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(60, 180, 20),
},
},
{
name: "Should not throw error if complete-overlapped smaller Extent is erroneous",
input: &PrometheusRequest{
// This request is carefully crated such that cachedEntry is not used to fulfill
// the request.
Start: time.UnixMilli(160),
End: time.UnixMilli(180),
Step: 20,
},
cachedEntry: []Extent{
{
Start: 60,
End: 80,
// if the optimization of "sorting by End when Start of 2 Extents are equal" is not there, this nil
// response would cause error during Extents merge phase. With the optimization
// this bad Extent should be dropped. The good Extent below can be used instead.
Response: nil,
},
mkExtentWithStep(60, 160, 20),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(60, 180, 20),
},
},
} {
t.Run(tc.name, func(t *testing.T) {
sut := resultsCache{
extractor: PrometheusResponseExtractor{},
minCacheExtent: 10,
limits: mockLimits{},
merger: PrometheusCodec,
parallelismForReq: func(_ context.Context, tenantIDs []string, r Request) int { return 1 },
next: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
return mkAPIResponse(req.GetStart().UnixMilli(), req.GetEnd().UnixMilli(), req.GetStep()), nil
}),
}
ctx := user.InjectOrgID(context.Background(), "1")
response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0)
require.NoError(t, err)
expectedResponse := mkAPIResponse(tc.input.GetStart().UnixMilli(), tc.input.GetEnd().UnixMilli(), tc.input.GetStep())
require.Equal(t, expectedResponse, response, "response does not match the expectation")
require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, "updated cache entry does not match the expectation")
})
}
}
func TestResultsCache(t *testing.T) {
calls := 0
cfg := ResultsCacheConfig{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
Config: resultscache.Config{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
},
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
@ -761,7 +412,7 @@ func TestResultsCache(t *testing.T) {
rcm, err := NewResultsCacheMiddleware(
log.NewNopLogger(),
c,
constSplitter(day),
resultscache.ConstSplitter(day),
mockLimits{},
PrometheusCodec,
PrometheusResponseExtractor{},
@ -807,7 +458,7 @@ func TestResultsCacheRecent(t *testing.T) {
rcm, err := NewResultsCacheMiddleware(
log.NewNopLogger(),
c,
constSplitter(day),
resultscache.ConstSplitter(day),
mockLimits{maxCacheFreshness: 10 * time.Minute},
PrometheusCodec,
PrometheusResponseExtractor{},
@ -844,122 +495,6 @@ func TestResultsCacheRecent(t *testing.T) {
require.Equal(t, parsedResponse, resp)
}
func TestResultsCacheMaxFreshness(t *testing.T) {
modelNow := model.Now()
for i, tc := range []struct {
fakeLimits Limits
Handler HandlerFunc
expectedResponse *PrometheusResponse
}{
{
fakeLimits: mockLimits{maxCacheFreshness: 5 * time.Second},
Handler: nil,
expectedResponse: mkAPIResponse(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3), 10),
},
{
// should not lookup cache because per-tenant override will be applied
fakeLimits: mockLimits{maxCacheFreshness: 10 * time.Minute},
Handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) {
return parsedResponse, nil
}),
expectedResponse: parsedResponse,
},
} {
t.Run(strconv.Itoa(i), func(t *testing.T) {
var cfg ResultsCacheConfig
flagext.DefaultValues(&cfg)
cfg.CacheConfig.Cache = cache.NewMockCache()
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
require.NoError(t, err)
fakeLimits := tc.fakeLimits
rcm, err := NewResultsCacheMiddleware(
log.NewNopLogger(),
c,
constSplitter(day),
fakeLimits,
PrometheusCodec,
PrometheusResponseExtractor{},
nil,
nil,
func(_ context.Context, tenantIDs []string, r Request) int {
return tc.fakeLimits.MaxQueryParallelism(context.Background(), "fake")
},
false,
nil,
)
require.NoError(t, err)
// create cache with handler
rc := rcm.Wrap(tc.Handler)
ctx := user.InjectOrgID(context.Background(), "1")
// create request with start end within the key extents
req := parsedRequest.WithStartEnd(time.UnixMilli(int64(modelNow)-(50*1e3)), time.UnixMilli(int64(modelNow)-(10*1e3)))
// fill cache
key := constSplitter(day).GenerateCacheKey(context.Background(), "1", req)
rc.(*resultsCache).put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))})
resp, err := rc.Do(ctx, req)
require.NoError(t, err)
require.Equal(t, tc.expectedResponse, resp)
})
}
}
func Test_resultsCache_MissingData(t *testing.T) {
cfg := ResultsCacheConfig{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
require.NoError(t, err)
rm, err := NewResultsCacheMiddleware(
log.NewNopLogger(),
c,
constSplitter(day),
mockLimits{},
PrometheusCodec,
PrometheusResponseExtractor{},
nil,
nil,
func(_ context.Context, tenantIDs []string, r Request) int {
return mockLimits{}.MaxQueryParallelism(context.Background(), "fake")
},
false,
nil,
)
require.NoError(t, err)
rc := rm.Wrap(nil).(*resultsCache)
ctx := context.Background()
// fill up the cache
rc.put(ctx, "empty", []Extent{{
Start: 100,
End: 200,
Response: nil,
}})
rc.put(ctx, "notempty", []Extent{mkExtent(100, 120)})
rc.put(ctx, "mixed", []Extent{mkExtent(100, 120), {
Start: 120,
End: 200,
Response: nil,
}})
extents, hit := rc.get(ctx, "empty")
require.Empty(t, extents)
require.False(t, hit)
extents, hit = rc.get(ctx, "notempty")
require.Equal(t, len(extents), 1)
require.True(t, hit)
extents, hit = rc.get(ctx, "mixed")
require.Equal(t, len(extents), 0)
require.False(t, hit)
}
func toMs(t time.Duration) int64 {
return t.Nanoseconds() / (int64(time.Millisecond) / int64(time.Nanosecond))
}
@ -984,7 +519,7 @@ func TestConstSplitter_generateCacheKey(t *testing.T) {
}
for _, tt := range tests {
t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) {
if got := constSplitter(tt.interval).GenerateCacheKey(context.Background(), "fake", tt.r); got != tt.want {
if got := resultscache.ConstSplitter(tt.interval).GenerateCacheKey(context.Background(), "fake", tt.r.(resultscache.Request)); got != tt.want {
t.Errorf("generateKey() = %v, want %v", got, tt.want)
}
})
@ -1033,7 +568,7 @@ func TestResultsCacheShouldCacheFunc(t *testing.T) {
rcm, err := NewResultsCacheMiddleware(
log.NewNopLogger(),
c,
constSplitter(day),
resultscache.ConstSplitter(day),
mockLimits{maxCacheFreshness: 10 * time.Minute},
PrometheusCodec,
PrometheusResponseExtractor{},

@ -2,6 +2,8 @@ package queryrangebase
import (
"context"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
)
// RequestResponse contains a request response and the respective request that was used.
@ -58,3 +60,23 @@ func DoRequests(ctx context.Context, downstream Handler, reqs []Request, paralle
return resps, firstErr
}
type queryMergerAsCacheResponseMerger struct {
Merger
}
func (m *queryMergerAsCacheResponseMerger) MergeResponse(responses ...resultscache.Response) (resultscache.Response, error) {
cacheResponses := make([]Response, 0, len(responses))
for _, r := range responses {
cacheResponses = append(cacheResponses, r.(Response))
}
response, err := m.Merger.MergeResponse(cacheResponses...)
if err != nil {
return nil, err
}
return response.(resultscache.Response), nil
}
func FromQueryResponseMergerToCacheResponseMerger(m Merger) resultscache.ResponseMerger {
return &queryMergerAsCacheResponseMerger{m}
}

@ -585,7 +585,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
}{
{
name: "logs query touching just the active schema config",
req: defaultReq().WithStartEndTime(now.Add(-time.Hour).Time(), now.Time()).WithQuery(`{foo="bar"}`),
req: defaultReq().WithStartEnd(now.Add(-time.Hour).Time(), now.Time()).WithQuery(`{foo="bar"}`),
resp: &LokiResponse{
Status: loghttp.QueryStatusSuccess,
Headers: []definitions.PrometheusResponseHeader{
@ -596,7 +596,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "logs query touching just the prev schema config",
req: defaultReq().WithStartEndTime(confs[0].From.Time.Time(), confs[0].From.Time.Add(time.Hour).Time()).WithQuery(`{foo="bar"}`),
req: defaultReq().WithStartEnd(confs[0].From.Time.Time(), confs[0].From.Time.Add(time.Hour).Time()).WithQuery(`{foo="bar"}`),
resp: &LokiResponse{
Status: loghttp.QueryStatusSuccess,
Headers: []definitions.PrometheusResponseHeader{
@ -607,7 +607,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "metric query touching just the active schema config",
req: defaultReq().WithStartEndTime(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`),
req: defaultReq().WithStartEnd(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`),
resp: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@ -624,7 +624,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "metric query touching just the prev schema config",
req: defaultReq().WithStartEndTime(confs[0].From.Time.Add(time.Hour).Time(), confs[0].From.Time.Add(2*time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`),
req: defaultReq().WithStartEnd(confs[0].From.Time.Add(time.Hour).Time(), confs[0].From.Time.Add(2*time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`),
resp: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@ -641,7 +641,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "logs query covering both schemas",
req: defaultReq().WithStartEndTime(confs[0].From.Time.Time(), now.Time()).WithQuery(`{foo="bar"}`),
req: defaultReq().WithStartEnd(confs[0].From.Time.Time(), now.Time()).WithQuery(`{foo="bar"}`),
resp: &LokiResponse{
Status: loghttp.QueryStatusSuccess,
Headers: []definitions.PrometheusResponseHeader{
@ -652,7 +652,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "metric query covering both schemas",
req: defaultReq().WithStartEndTime(confs[0].From.Time.Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m])`),
req: defaultReq().WithStartEnd(confs[0].From.Time.Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m])`),
resp: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@ -669,7 +669,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "metric query with start/end within first schema but with large enough range to cover previous schema too",
req: defaultReq().WithStartEndTime(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[24h])`),
req: defaultReq().WithStartEnd(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[24h])`),
resp: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@ -686,7 +686,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "metric query with start/end within first schema but with large enough offset to shift it to previous schema",
req: defaultReq().WithStartEndTime(confs[1].From.Time.Add(5*time.Minute).Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m] offset 12h)`),
req: defaultReq().WithStartEnd(confs[1].From.Time.Add(5*time.Minute).Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m] offset 12h)`),
resp: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,

@ -7,11 +7,10 @@ import (
"strings"
"time"
"github.com/grafana/dskit/user"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/user"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"

@ -29,6 +29,7 @@ import (
"github.com/grafana/loki/pkg/querier/plan"
base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/index/seriesvolume"
"github.com/grafana/loki/pkg/util"
@ -46,11 +47,13 @@ var (
MaxRetries: 3,
CacheResults: true,
ResultsCacheConfig: base.ResultsCacheConfig{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
Enabled: true,
MaxSizeMB: 1024,
TTL: 24 * time.Hour,
Config: resultscache.Config{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
Enabled: true,
MaxSizeMB: 1024,
TTL: 24 * time.Hour,
},
},
},
},
@ -59,22 +62,26 @@ var (
CacheIndexStatsResults: true,
StatsCacheConfig: IndexStatsCacheConfig{
ResultsCacheConfig: base.ResultsCacheConfig{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
Enabled: true,
MaxSizeMB: 1024,
TTL: 24 * time.Hour,
Config: resultscache.Config{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
Enabled: true,
MaxSizeMB: 1024,
TTL: 24 * time.Hour,
},
},
},
},
},
VolumeCacheConfig: VolumeCacheConfig{
ResultsCacheConfig: base.ResultsCacheConfig{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
Enabled: true,
MaxSizeMB: 1024,
TTL: 24 * time.Hour,
Config: resultscache.Config{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
Enabled: true,
MaxSizeMB: 1024,
TTL: 24 * time.Hour,
},
},
},
},
@ -665,10 +672,12 @@ func TestNewTripperware_Caches(t *testing.T) {
Config: base.Config{
CacheResults: true,
ResultsCacheConfig: base.ResultsCacheConfig{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
MaxSizeMB: 1,
Enabled: true,
Config: resultscache.Config{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
MaxSizeMB: 1,
Enabled: true,
},
},
},
},
@ -684,10 +693,12 @@ func TestNewTripperware_Caches(t *testing.T) {
Config: base.Config{
CacheResults: true,
ResultsCacheConfig: base.ResultsCacheConfig{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
MaxSizeMB: 1,
Enabled: true,
Config: resultscache.Config{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
MaxSizeMB: 1,
Enabled: true,
},
},
},
},
@ -703,10 +714,12 @@ func TestNewTripperware_Caches(t *testing.T) {
Config: base.Config{
CacheResults: true,
ResultsCacheConfig: base.ResultsCacheConfig{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
Enabled: true,
MaxSizeMB: 2000,
Config: resultscache.Config{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
Enabled: true,
MaxSizeMB: 2000,
},
},
},
},
@ -714,10 +727,12 @@ func TestNewTripperware_Caches(t *testing.T) {
CacheIndexStatsResults: true,
StatsCacheConfig: IndexStatsCacheConfig{
ResultsCacheConfig: base.ResultsCacheConfig{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
Enabled: true,
MaxSizeMB: 1000,
Config: resultscache.Config{
CacheConfig: cache.Config{
EmbeddedCache: cache.EmbeddedCacheConfig{
Enabled: true,
MaxSizeMB: 1000,
},
},
},
},

@ -15,6 +15,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/validation"
)
@ -24,7 +25,7 @@ type VolumeSplitter struct {
}
// GenerateCacheKey generates a cache key based on the userID, Request and interval.
func (i VolumeSplitter) GenerateCacheKey(ctx context.Context, userID string, r queryrangebase.Request) string {
func (i VolumeSplitter) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
cacheKey := i.cacheKeyLimits.GenerateCacheKey(ctx, userID, r)
volumeReq := r.(*logproto.VolumeRequest)
@ -38,7 +39,7 @@ type VolumeExtractor struct{}
// Extract favors the ability to cache over exactness of results. It assumes a constant distribution
// of log volumes over a range and will extract subsets proportionally.
func (p VolumeExtractor) Extract(start, end int64, res queryrangebase.Response, resStart, resEnd int64) queryrangebase.Response {
func (p VolumeExtractor) Extract(start, end int64, res resultscache.Response, resStart, resEnd int64) resultscache.Response {
factor := util.GetFactorOfTime(start, end, resStart, resEnd)
volumeRes := res.(*VolumeResponse)
@ -101,7 +102,7 @@ func NewVolumeCacheMiddleware(
c cache.Cache,
cacheGenNumberLoader queryrangebase.CacheGenNumberLoader,
shouldCache queryrangebase.ShouldCacheFn,
parallelismForReq func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int,
parallelismForReq queryrangebase.ParallelismForReqFn,
retentionEnabled bool,
transformer UserIDTransformer,
metrics *queryrangebase.ResultsCacheMetrics,

@ -10,6 +10,8 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
@ -22,8 +24,10 @@ import (
func TestVolumeCache(t *testing.T) {
setup := func(volResp *VolumeResponse) (*int, queryrangebase.Handler) {
cfg := queryrangebase.ResultsCacheConfig{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
Config: resultscache.Config{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
},
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
@ -281,8 +285,10 @@ func TestVolumeCache_RecentData(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
cfg := queryrangebase.ResultsCacheConfig{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
Config: resultscache.Config{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
},
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)

@ -0,0 +1,467 @@
package resultscache
import (
"context"
"fmt"
"net/http"
"sort"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types"
"github.com/grafana/dskit/httpgrpc"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/common/model"
"github.com/uber/jaeger-client-go"
"github.com/grafana/dskit/tenant"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/util/math"
"github.com/grafana/loki/pkg/util/spanlogger"
"github.com/grafana/loki/pkg/util/validation"
)
// ConstSplitter is a utility for using a constant split interval when determining cache keys
type ConstSplitter time.Duration
// GenerateCacheKey generates a cache key based on the userID, Request and interval.
func (t ConstSplitter) GenerateCacheKey(_ context.Context, userID string, r Request) string {
currentInterval := r.GetStart().UnixMilli() / int64(time.Duration(t)/time.Millisecond)
return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval)
}
// ShouldCacheReqFn checks whether the current request should go to cache or not.
// If not, just send the request to next handler.
type ShouldCacheReqFn func(ctx context.Context, r Request) bool
// ShouldCacheResFn checks whether the current response should go to cache or not.
type ShouldCacheResFn func(ctx context.Context, r Request, res Response, maxCacheTime int64) bool
// ParallelismForReqFn returns the parallelism for a given request.
type ParallelismForReqFn func(ctx context.Context, tenantIDs []string, r Request) int
type ResultsCache struct {
logger log.Logger
next Handler
cache cache.Cache
limits Limits
splitter KeyGenerator
cacheGenNumberLoader CacheGenNumberLoader
retentionEnabled bool
extractor Extractor
minCacheExtent int64 // discard any cache extent smaller than this
merger ResponseMerger
shouldCacheReq ShouldCacheReqFn
shouldCacheRes ShouldCacheResFn
parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int
}
// NewResultsCache creates results cache from config.
// The middleware cache result using a unique cache key for a given request (step,query,user) and interval.
// The cache assumes that each request length (end-start) is below or equal the interval.
// Each request starting from within the same interval will hit the same cache entry.
// If the cache doesn't have the entire duration of the request cached, it will query the uncached parts and append them to the cache entries.
// see `generateKey`.
func NewResultsCache(
logger log.Logger,
c cache.Cache,
next Handler,
keyGen KeyGenerator,
limits Limits,
merger ResponseMerger,
extractor Extractor,
shouldCacheReq ShouldCacheReqFn,
shouldCacheRes ShouldCacheResFn,
parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int,
cacheGenNumberLoader CacheGenNumberLoader,
retentionEnabled bool,
) *ResultsCache {
return &ResultsCache{
logger: logger,
next: next,
cache: c,
limits: limits,
splitter: keyGen,
cacheGenNumberLoader: cacheGenNumberLoader,
retentionEnabled: retentionEnabled,
extractor: extractor,
minCacheExtent: (5 * time.Minute).Milliseconds(),
merger: merger,
shouldCacheReq: shouldCacheReq,
shouldCacheRes: shouldCacheRes,
parallelismForReq: parallelismForReq,
}
}
func (s ResultsCache) Do(ctx context.Context, r Request) (Response, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "resultsCache.Do")
defer sp.Finish()
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
if s.shouldCacheReq != nil && !s.shouldCacheReq(ctx, r) {
return s.next.Do(ctx, r)
}
if s.cacheGenNumberLoader != nil && s.retentionEnabled {
ctx = cache.InjectCacheGenNumber(ctx, s.cacheGenNumberLoader.GetResultsCacheGenNumber(tenantIDs))
}
var (
key = s.splitter.GenerateCacheKey(ctx, tenant.JoinTenantIDs(tenantIDs), r)
extents []Extent
response Response
)
sp.LogKV(
"query", r.GetQuery(),
"step", time.UnixMilli(r.GetStep()),
"start", r.GetStart(),
"end", r.GetEnd(),
"key", key,
)
cacheFreshnessCapture := func(id string) time.Duration { return s.limits.MaxCacheFreshness(ctx, id) }
maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture)
maxCacheTime := int64(model.Now().Add(-maxCacheFreshness))
if r.GetStart().UnixMilli() > maxCacheTime {
return s.next.Do(ctx, r)
}
cached, ok := s.get(ctx, key)
if ok {
response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime)
} else {
response, extents, err = s.handleMiss(ctx, r, maxCacheTime)
}
if err == nil && len(extents) > 0 {
extents, err := s.filterRecentExtents(r, maxCacheFreshness, extents)
if err != nil {
return nil, err
}
s.put(ctx, key, extents)
}
return response, err
}
func (s ResultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) {
response, err := s.next.Do(ctx, r)
if err != nil {
return nil, nil, err
}
if !s.shouldCacheRes(ctx, r, response, maxCacheTime) {
return response, []Extent{}, nil
}
extent, err := toExtent(ctx, r, response)
if err != nil {
return nil, nil, err
}
extents := []Extent{
extent,
}
return response, extents, nil
}
func (s ResultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) {
var (
reqResps []RequestResponse
err error
)
sp, ctx := opentracing.StartSpanFromContext(ctx, "handleHit")
defer sp.Finish()
log := spanlogger.FromContext(ctx)
defer log.Finish()
requests, responses, err := s.partition(r, extents)
if err != nil {
return nil, nil, err
}
if len(requests) == 0 {
response, err := s.merger.MergeResponse(responses...)
// No downstream requests so no need to write back to the cache.
return response, nil, err
}
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
reqResps, err = DoRequests(ctx, s.next, requests, s.parallelismForReq(ctx, tenantIDs, r))
if err != nil {
return nil, nil, err
}
for _, reqResp := range reqResps {
responses = append(responses, reqResp.Response)
if s.shouldCacheRes != nil && !s.shouldCacheRes(ctx, r, reqResp.Response, maxCacheTime) {
continue
}
extent, err := toExtent(ctx, reqResp.Request, reqResp.Response)
if err != nil {
return nil, nil, err
}
extents = append(extents, extent)
}
sort.Slice(extents, func(i, j int) bool {
if extents[i].Start == extents[j].Start {
// as an optimization, for two extents starts at the same time, we
// put bigger extent at the front of the slice, which helps
// to reduce the amount of merge we have to do later.
return extents[i].End > extents[j].End
}
return extents[i].Start < extents[j].Start
})
// Merge any extents - potentially overlapping
accumulator, err := newAccumulator(extents[0])
if err != nil {
return nil, nil, err
}
mergedExtents := make([]Extent, 0, len(extents))
for i := 1; i < len(extents); i++ {
if accumulator.End+r.GetStep() < extents[i].Start {
mergedExtents, err = merge(mergedExtents, accumulator)
if err != nil {
return nil, nil, err
}
accumulator, err = newAccumulator(extents[i])
if err != nil {
return nil, nil, err
}
continue
}
if accumulator.End >= extents[i].End {
continue
}
accumulator.TraceId = jaegerTraceID(ctx)
accumulator.End = extents[i].End
currentRes, err := extents[i].toResponse()
if err != nil {
return nil, nil, err
}
merged, err := s.merger.MergeResponse(accumulator.Response, currentRes)
if err != nil {
return nil, nil, err
}
accumulator.Response = merged
}
mergedExtents, err = merge(mergedExtents, accumulator)
if err != nil {
return nil, nil, err
}
response, err := s.merger.MergeResponse(responses...)
return response, mergedExtents, err
}
type accumulator struct {
Response
Extent
}
func merge(extents []Extent, acc *accumulator) ([]Extent, error) {
anyResp, err := types.MarshalAny(acc.Response)
if err != nil {
return nil, err
}
return append(extents, Extent{
Start: acc.Extent.Start,
End: acc.Extent.End,
Response: anyResp,
TraceId: acc.Extent.TraceId,
}), nil
}
func newAccumulator(base Extent) (*accumulator, error) {
res, err := base.toResponse()
if err != nil {
return nil, err
}
return &accumulator{
Response: res,
Extent: base,
}, nil
}
func toExtent(ctx context.Context, req Request, res Response) (Extent, error) {
anyResp, err := types.MarshalAny(res)
if err != nil {
return Extent{}, err
}
return Extent{
Start: req.GetStart().UnixMilli(),
End: req.GetEnd().UnixMilli(),
Response: anyResp,
TraceId: jaegerTraceID(ctx),
}, nil
}
// partition calculates the required requests to satisfy req given the cached data.
// extents must be in order by start time.
func (s ResultsCache) partition(req Request, extents []Extent) ([]Request, []Response, error) {
var requests []Request
var cachedResponses []Response
start := req.GetStart().UnixMilli()
end := req.GetEnd().UnixMilli()
for _, extent := range extents {
// If there is no overlap, ignore this extent.
if extent.GetEnd() < start || extent.GetStart() > end {
continue
}
// If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries.
// Hopefully tiny request can make tiny extent into not-so-tiny extent.
// However if the step is large enough, the split_query_by_interval middleware would generate a query with same start and end.
// For example, if the step size is more than 12h and the interval is 24h.
// This means the extent's start and end time would be same, even if the timerange covers several hours.
if (req.GetStart() != req.GetEnd()) && ((end - start) > s.minCacheExtent) && (extent.End-extent.Start < s.minCacheExtent) {
continue
}
// If there is a bit missing at the front, make a request for that.
if start < extent.Start {
r := req.WithStartEndForCache(time.UnixMilli(start), time.UnixMilli(extent.Start))
requests = append(requests, r)
}
res, err := extent.toResponse()
if err != nil {
return nil, nil, err
}
// extract the overlap from the cached extent.
cachedResponses = append(cachedResponses, s.extractor.Extract(start, end, res, extent.GetStart(), extent.GetEnd()))
start = extent.End
}
// Lastly, make a request for any data missing at the end.
if start < req.GetEnd().UnixMilli() {
r := req.WithStartEndForCache(time.UnixMilli(start), time.UnixMilli(end))
requests = append(requests, r)
}
// If start and end are the same (valid in promql), start == req.GetEnd() and we won't do the query.
// But we should only do the request if we don't have a valid cached response for it.
if req.GetStart() == req.GetEnd() && len(cachedResponses) == 0 {
requests = append(requests, req)
}
return requests, cachedResponses, nil
}
func (s ResultsCache) filterRecentExtents(req Request, maxCacheFreshness time.Duration, extents []Extent) ([]Extent, error) {
step := math.Max64(1, req.GetStep())
maxCacheTime := (int64(model.Now().Add(-maxCacheFreshness)) / step) * step
for i := range extents {
// Never cache data for the latest freshness period.
if extents[i].End > maxCacheTime {
extents[i].End = maxCacheTime
res, err := extents[i].toResponse()
if err != nil {
return nil, err
}
extracted := s.extractor.Extract(extents[i].GetStart(), maxCacheTime, res, extents[i].GetStart(), extents[i].GetEnd())
anyResp, err := types.MarshalAny(extracted)
if err != nil {
return nil, err
}
extents[i].Response = anyResp
}
}
return extents, nil
}
func (s ResultsCache) get(ctx context.Context, key string) ([]Extent, bool) {
found, bufs, _, _ := s.cache.Fetch(ctx, []string{cache.HashKey(key)})
if len(found) != 1 {
return nil, false
}
var resp CachedResponse
sp, ctx := opentracing.StartSpanFromContext(ctx, "unmarshal-extent") //nolint:ineffassign,staticcheck
defer sp.Finish()
log := spanlogger.FromContext(ctx)
defer log.Finish()
log.LogFields(otlog.Int("bytes", len(bufs[0])))
if err := proto.Unmarshal(bufs[0], &resp); err != nil {
level.Error(log).Log("msg", "error unmarshalling cached value", "err", err)
log.Error(err)
return nil, false
}
if resp.Key != key {
return nil, false
}
// Refreshes the cache if it contains an old proto schema.
for _, e := range resp.Extents {
if e.Response == nil {
return nil, false
}
}
return resp.Extents, true
}
func (s ResultsCache) put(ctx context.Context, key string, extents []Extent) {
buf, err := proto.Marshal(&CachedResponse{
Key: key,
Extents: extents,
})
if err != nil {
level.Error(s.logger).Log("msg", "error marshalling cached value", "err", err)
return
}
_ = s.cache.Store(ctx, []string{cache.HashKey(key)}, [][]byte{buf})
}
func jaegerTraceID(ctx context.Context) string {
span := opentracing.SpanFromContext(ctx)
if span == nil {
return ""
}
spanContext, ok := span.Context().(jaeger.SpanContext)
if !ok {
return ""
}
return spanContext.TraceID().String()
}
func (e *Extent) toResponse() (Response, error) {
msg, err := types.EmptyAny(e.Response)
if err != nil {
return nil, err
}
if err := types.UnmarshalAny(e.Response, msg); err != nil {
return nil, err
}
resp, ok := msg.(Response)
if !ok {
return nil, fmt.Errorf("bad cached type")
}
return resp, nil
}

@ -0,0 +1,605 @@
package resultscache
import (
"context"
"strconv"
"testing"
"time"
"github.com/go-kit/log"
"github.com/gogo/protobuf/types"
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/user"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/util/constants"
)
const day = 24 * time.Hour
var (
parsedRequest = &MockRequest{
Start: time.UnixMilli(1536673680 * 1e3),
End: time.UnixMilli(1536716898 * 1e3),
Step: 120 * 1e3,
Query: "sum(container_memory_rss) by (namespace)",
}
parsedResponse = &MockResponse{
Labels: []*MockLabelsPair{
{Name: "foo", Value: "bar"},
},
Samples: []*MockSample{
{Value: 137, TimestampMs: 1536673680000},
{Value: 137, TimestampMs: 1536673780000},
},
}
)
func TestPartition(t *testing.T) {
for _, tc := range []struct {
name string
input Request
prevCachedResponse []Extent
expectedRequests []Request
expectedCachedResponse []Response
}{
{
name: "Test a complete hit.",
input: &MockRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(0, 100),
},
expectedCachedResponse: []Response{
mkAPIResponse(0, 100, 10),
},
},
{
name: "Test with a complete miss.",
input: &MockRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(110, 210),
},
expectedRequests: []Request{
&MockRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(100),
},
},
},
{
name: "Test a partial hit.",
input: &MockRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(50, 100),
},
expectedRequests: []Request{
&MockRequest{
Start: time.UnixMilli(0),
End: time.UnixMilli(50),
},
},
expectedCachedResponse: []Response{
mkAPIResponse(50, 100, 10),
},
},
{
name: "Test multiple partial hits.",
input: &MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(200),
},
prevCachedResponse: []Extent{
mkExtent(50, 120),
mkExtent(160, 250),
},
expectedRequests: []Request{
&MockRequest{
Start: time.UnixMilli(120),
End: time.UnixMilli(160),
},
},
expectedCachedResponse: []Response{
mkAPIResponse(100, 120, 10),
mkAPIResponse(160, 200, 10),
},
},
{
name: "Partial hits with tiny gap.",
input: &MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(160),
},
prevCachedResponse: []Extent{
mkExtent(50, 120),
mkExtent(122, 130),
},
expectedRequests: []Request{
&MockRequest{
Start: time.UnixMilli(120),
End: time.UnixMilli(160),
},
},
expectedCachedResponse: []Response{
mkAPIResponse(100, 120, 10),
},
},
{
name: "Extent is outside the range and the request has a single step (same start and end).",
input: &MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(50, 90),
},
expectedRequests: []Request{
&MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(100),
},
},
},
{
name: "Test when hit has a large step and only a single sample extent.",
// If there is a only a single sample in the split interval, start and end will be the same.
input: &MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(100),
},
prevCachedResponse: []Extent{
mkExtent(100, 100),
},
expectedCachedResponse: []Response{
mkAPIResponse(100, 105, 10),
},
},
} {
t.Run(tc.name, func(t *testing.T) {
s := ResultsCache{
extractor: MockExtractor{},
minCacheExtent: 10,
}
reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse)
require.Nil(t, err)
require.Equal(t, tc.expectedRequests, reqs)
require.Equal(t, tc.expectedCachedResponse, resps)
})
}
}
func TestHandleHit(t *testing.T) {
for _, tc := range []struct {
name string
input Request
cachedEntry []Extent
expectedUpdatedCachedEntry []Extent
}{
{
name: "Should drop tiny extent that overlaps with non-tiny request only",
input: &MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(120),
Step: 5,
},
cachedEntry: []Extent{
mkExtentWithStep(0, 50, 5),
mkExtentWithStep(60, 65, 5),
mkExtentWithStep(100, 105, 5),
mkExtentWithStep(110, 150, 5),
mkExtentWithStep(160, 165, 5),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(0, 50, 5),
mkExtentWithStep(60, 65, 5),
mkExtentWithStep(100, 150, 5),
mkExtentWithStep(160, 165, 5),
},
},
{
name: "Should replace tiny extents that are cover by bigger request",
input: &MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(200),
Step: 5,
},
cachedEntry: []Extent{
mkExtentWithStep(0, 50, 5),
mkExtentWithStep(60, 65, 5),
mkExtentWithStep(100, 105, 5),
mkExtentWithStep(110, 115, 5),
mkExtentWithStep(120, 125, 5),
mkExtentWithStep(220, 225, 5),
mkExtentWithStep(240, 250, 5),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(0, 50, 5),
mkExtentWithStep(60, 65, 5),
mkExtentWithStep(100, 200, 5),
mkExtentWithStep(220, 225, 5),
mkExtentWithStep(240, 250, 5),
},
},
{
name: "Should not drop tiny extent that completely overlaps with tiny request",
input: &MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(105),
Step: 5,
},
cachedEntry: []Extent{
mkExtentWithStep(0, 50, 5),
mkExtentWithStep(60, 65, 5),
mkExtentWithStep(100, 105, 5),
mkExtentWithStep(160, 165, 5),
},
expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
},
{
name: "Should not drop tiny extent that partially center-overlaps with tiny request",
input: &MockRequest{
Start: time.UnixMilli(106),
End: time.UnixMilli(108),
Step: 2,
},
cachedEntry: []Extent{
mkExtentWithStep(60, 64, 2),
mkExtentWithStep(104, 110, 2),
mkExtentWithStep(160, 166, 2),
},
expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
},
{
name: "Should not drop tiny extent that partially left-overlaps with tiny request",
input: &MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(106),
Step: 2,
},
cachedEntry: []Extent{
mkExtentWithStep(60, 64, 2),
mkExtentWithStep(104, 110, 2),
mkExtentWithStep(160, 166, 2),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(60, 64, 2),
mkExtentWithStep(100, 110, 2),
mkExtentWithStep(160, 166, 2),
},
},
{
name: "Should not drop tiny extent that partially right-overlaps with tiny request",
input: &MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(106),
Step: 2,
},
cachedEntry: []Extent{
mkExtentWithStep(60, 64, 2),
mkExtentWithStep(98, 102, 2),
mkExtentWithStep(160, 166, 2),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(60, 64, 2),
mkExtentWithStep(98, 106, 2),
mkExtentWithStep(160, 166, 2),
},
},
{
name: "Should merge fragmented extents if request fills the hole",
input: &MockRequest{
Start: time.UnixMilli(40),
End: time.UnixMilli(80),
Step: 20,
},
cachedEntry: []Extent{
mkExtentWithStep(0, 20, 20),
mkExtentWithStep(80, 100, 20),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(0, 100, 20),
},
},
{
name: "Should left-extend extent if request starts earlier than extent in cache",
input: &MockRequest{
Start: time.UnixMilli(40),
End: time.UnixMilli(80),
Step: 20,
},
cachedEntry: []Extent{
mkExtentWithStep(60, 160, 20),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(40, 160, 20),
},
},
{
name: "Should right-extend extent if request ends later than extent in cache",
input: &MockRequest{
Start: time.UnixMilli(100),
End: time.UnixMilli(180),
Step: 20,
},
cachedEntry: []Extent{
mkExtentWithStep(60, 160, 20),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(60, 180, 20),
},
},
{
name: "Should not throw error if complete-overlapped smaller Extent is erroneous",
input: &MockRequest{
// This request is carefully crated such that cachedEntry is not used to fulfill
// the request.
Start: time.UnixMilli(160),
End: time.UnixMilli(180),
Step: 20,
},
cachedEntry: []Extent{
{
Start: 60,
End: 80,
// if the optimization of "sorting by End when Start of 2 Extents are equal" is not there, this nil
// response would cause error during Extents merge phase. With the optimization
// this bad Extent should be dropped. The good Extent below can be used instead.
Response: nil,
},
mkExtentWithStep(60, 160, 20),
},
expectedUpdatedCachedEntry: []Extent{
mkExtentWithStep(60, 180, 20),
},
},
} {
t.Run(tc.name, func(t *testing.T) {
sut := ResultsCache{
extractor: MockExtractor{},
minCacheExtent: 10,
limits: mockLimits{},
merger: MockMerger{},
parallelismForReq: func(_ context.Context, tenantIDs []string, r Request) int { return 1 },
next: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
return mkAPIResponse(req.GetStart().UnixMilli(), req.GetEnd().UnixMilli(), req.GetStep()), nil
}),
}
ctx := user.InjectOrgID(context.Background(), "1")
response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0)
require.NoError(t, err)
expectedResponse := mkAPIResponse(tc.input.GetStart().UnixMilli(), tc.input.GetEnd().UnixMilli(), tc.input.GetStep())
require.Equal(t, expectedResponse, response, "response does not match the expectation")
require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, "updated cache entry does not match the expectation")
})
}
}
func TestResultsCacheMaxFreshness(t *testing.T) {
modelNow := model.Now()
for i, tc := range []struct {
fakeLimits Limits
Handler HandlerFunc
expectedResponse *MockResponse
}{
{
fakeLimits: mockLimits{maxCacheFreshness: 5 * time.Second},
Handler: nil,
expectedResponse: mkAPIResponse(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3), 10),
},
{
// should not lookup cache because per-tenant override will be applied
fakeLimits: mockLimits{maxCacheFreshness: 10 * time.Minute},
Handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) {
return parsedResponse, nil
}),
expectedResponse: parsedResponse,
},
} {
t.Run(strconv.Itoa(i), func(t *testing.T) {
var cfg Config
flagext.DefaultValues(&cfg)
cfg.CacheConfig.Cache = cache.NewMockCache()
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
require.NoError(t, err)
fakeLimits := tc.fakeLimits
rc := NewResultsCache(
log.NewNopLogger(),
c,
tc.Handler,
ConstSplitter(day),
fakeLimits,
MockMerger{},
MockExtractor{},
nil,
nil,
func(_ context.Context, tenantIDs []string, r Request) int {
return 10
},
nil,
false,
)
require.NoError(t, err)
// create cache with handler
ctx := user.InjectOrgID(context.Background(), "1")
// create request with start end within the key extents
req := parsedRequest.WithStartEndForCache(time.UnixMilli(int64(modelNow)-(50*1e3)), time.UnixMilli(int64(modelNow)-(10*1e3)))
// fill cache
key := ConstSplitter(day).GenerateCacheKey(context.Background(), "1", req)
rc.put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))})
resp, err := rc.Do(ctx, req)
require.NoError(t, err)
require.Equal(t, tc.expectedResponse, resp)
})
}
}
func Test_resultsCache_MissingData(t *testing.T) {
cfg := Config{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
require.NoError(t, err)
rc := NewResultsCache(
log.NewNopLogger(),
c,
nil,
ConstSplitter(day),
mockLimits{},
MockMerger{},
MockExtractor{},
nil,
nil,
func(_ context.Context, tenantIDs []string, r Request) int {
return 10
},
nil,
false,
)
require.NoError(t, err)
ctx := context.Background()
// fill up the cache
rc.put(ctx, "empty", []Extent{{
Start: 100,
End: 200,
Response: nil,
}})
rc.put(ctx, "notempty", []Extent{mkExtent(100, 120)})
rc.put(ctx, "mixed", []Extent{mkExtent(100, 120), {
Start: 120,
End: 200,
Response: nil,
}})
extents, hit := rc.get(ctx, "empty")
require.Empty(t, extents)
require.False(t, hit)
extents, hit = rc.get(ctx, "notempty")
require.Equal(t, len(extents), 1)
require.True(t, hit)
extents, hit = rc.get(ctx, "mixed")
require.Equal(t, len(extents), 0)
require.False(t, hit)
}
func mkAPIResponse(start, end, step int64) *MockResponse {
var samples []*MockSample
for i := start; i <= end; i += step {
samples = append(samples, &MockSample{
TimestampMs: i,
Value: float64(i),
})
}
return &MockResponse{
Labels: []*MockLabelsPair{
{Name: "foo", Value: "bar"},
},
Samples: samples,
}
}
func mkExtent(start, end int64) Extent {
return mkExtentWithStep(start, end, 10)
}
func mkExtentWithStep(start, end, step int64) Extent {
res := mkAPIResponse(start, end, step)
anyRes, err := types.MarshalAny(res)
if err != nil {
panic(err)
}
return Extent{
Start: start,
End: end,
Response: anyRes,
}
}
func (r *MockRequest) WithStartEndForCache(start time.Time, end time.Time) Request {
m := *r
m.Start = start
m.End = end
return &m
}
type MockMerger struct{}
func (m MockMerger) MergeResponse(responses ...Response) (Response, error) {
samples := make([]*MockSample, 0, len(responses)*2)
for _, response := range responses {
samples = append(samples, response.(*MockResponse).Samples...)
}
// Merge samples by:
// 1. Sorting them by time.
// 2. Removing duplicates.
slices.SortFunc(samples, func(a, b *MockSample) int {
if a.TimestampMs == b.TimestampMs {
return 0
}
if a.TimestampMs < b.TimestampMs {
return -1
}
return 1
})
samples = slices.CompactFunc(samples, func(a, b *MockSample) bool {
return a.TimestampMs == b.TimestampMs
})
return &MockResponse{
Labels: responses[0].(*MockResponse).Labels,
Samples: samples,
}, nil
}
type MockExtractor struct{}
func (m MockExtractor) Extract(start, end int64, res Response, _, _ int64) Response {
mockRes := res.(*MockResponse)
result := MockResponse{
Labels: mockRes.Labels,
Samples: make([]*MockSample, 0, len(mockRes.Samples)),
}
for _, sample := range mockRes.Samples {
if start <= sample.TimestampMs && sample.TimestampMs <= end {
result.Samples = append(result.Samples, sample)
}
}
return &result
}
type mockLimits struct {
maxCacheFreshness time.Duration
}
func (m mockLimits) MaxCacheFreshness(context.Context, string) time.Duration {
return m.maxCacheFreshness
}

@ -0,0 +1,41 @@
package resultscache
import (
"context"
"flag"
"time"
"github.com/pkg/errors"
"github.com/grafana/loki/pkg/storage/chunk/cache"
)
// Config is the config for the results cache.
type Config struct {
CacheConfig cache.Config `yaml:"cache"`
Compression string `yaml:"compression"`
}
func (cfg *Config) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
cfg.CacheConfig.RegisterFlagsWithPrefix(prefix, "", f)
f.StringVar(&cfg.Compression, prefix+"compression", "", "Use compression in cache. The default is an empty value '', which disables compression. Supported values are: 'snappy' and ''.")
}
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.RegisterFlagsWithPrefix(f, "")
}
func (cfg *Config) Validate() error {
switch cfg.Compression {
case "snappy", "":
// valid
default:
return errors.Errorf("unsupported compression type: %s", cfg.Compression)
}
return nil
}
type Limits interface {
MaxCacheFreshness(ctx context.Context, tenantID string) time.Duration
}

@ -0,0 +1,56 @@
package resultscache
import (
"context"
"time"
"github.com/gogo/protobuf/proto"
)
type Request interface {
proto.Message
// GetStart returns the start timestamp of the request in milliseconds.
GetStart() time.Time
// GetEnd returns the end timestamp of the request in milliseconds.
GetEnd() time.Time
// GetStep returns the step of the request in milliseconds.
GetStep() int64
// GetQuery returns the query of the request.
GetQuery() string
// GetCachingOptions returns the caching options.
GetCachingOptions() CachingOptions
// WithStartEndForCache clone the current request with different start and end timestamp.
WithStartEndForCache(start time.Time, end time.Time) Request
}
type Response interface {
proto.Message
}
// ResponseMerger is used by middlewares making multiple requests to merge back all responses into a single one.
type ResponseMerger interface {
// MergeResponse merges responses from multiple requests into a single Response
MergeResponse(...Response) (Response, error)
}
type Handler interface {
Do(ctx context.Context, req Request) (Response, error)
}
// Extractor is used by the cache to extract a subset of a response from a cache entry.
type Extractor interface {
// Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds
// in the `res` response which spans from `resStart` to `resEnd`.
Extract(start, end int64, res Response, resStart, resEnd int64) Response
}
// KeyGenerator generates cache keys. This is a useful interface for downstream
// consumers who wish to implement their own strategies.
type KeyGenerator interface {
GenerateCacheKey(ctx context.Context, userID string, r Request) string
}
type CacheGenNumberLoader interface {
GetResultsCacheGenNumber(tenantIDs []string) string
Stop()
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,41 @@
syntax = "proto3";
package resultscache;
import "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "types.proto";
option go_package = "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
message MockRequest {
string path = 1;
google.protobuf.Timestamp start = 2 [
(gogoproto.stdtime) = true,
(gogoproto.nullable) = false
];
google.protobuf.Timestamp end = 3 [
(gogoproto.stdtime) = true,
(gogoproto.nullable) = false
];
int64 step = 4;
string query = 6;
CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false];
}
message MockResponse {
repeated MockLabelsPair labels = 1;
repeated MockSample samples = 2;
}
message MockLabelsPair {
string name = 1;
string value = 2;
}
message MockSample {
double value = 1;
int64 timestamp_ms = 2;
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,34 @@
syntax = "proto3";
package resultscache;
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
option go_package = "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
// Defined here to prevent circular imports between logproto & queryrangebase
message CachingOptions {
bool disabled = 1;
}
message CachedResponse {
string key = 1 [(gogoproto.jsontag) = "key"];
// List of cached responses; non-overlapping and in order.
repeated Extent extents = 2 [
(gogoproto.nullable) = false,
(gogoproto.jsontag) = "extents"
];
}
message Extent {
int64 start = 1 [(gogoproto.jsontag) = "start"];
int64 end = 2 [(gogoproto.jsontag) = "end"];
// reserved the previous key to ensure cache transition
reserved 3;
string trace_id = 4 [(gogoproto.jsontag) = "-"];
google.protobuf.Any response = 5 [(gogoproto.jsontag) = "response"];
}

@ -0,0 +1,67 @@
package resultscache
import (
"context"
)
type HandlerFunc func(context.Context, Request) (Response, error)
// Do implements Handler.
func (q HandlerFunc) Do(ctx context.Context, req Request) (Response, error) {
return q(ctx, req)
}
// RequestResponse contains a request response and the respective request that was used.
type RequestResponse struct {
Request Request
Response Response
}
// DoRequests executes a list of requests in parallel.
func DoRequests(ctx context.Context, downstream Handler, reqs []Request, parallelism int) ([]RequestResponse, error) {
// If one of the requests fail, we want to be able to cancel the rest of them.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Feed all requests to a bounded intermediate channel to limit parallelism.
intermediate := make(chan Request)
go func() {
for _, req := range reqs {
intermediate <- req
}
close(intermediate)
}()
respChan, errChan := make(chan RequestResponse), make(chan error)
if parallelism > len(reqs) {
parallelism = len(reqs)
}
for i := 0; i < parallelism; i++ {
go func() {
for req := range intermediate {
resp, err := downstream.Do(ctx, req)
if err != nil {
errChan <- err
} else {
respChan <- RequestResponse{req, resp}
}
}
}()
}
resps := make([]RequestResponse, 0, len(reqs))
var firstErr error
for range reqs {
select {
case resp := <-respChan:
resps = append(resps, resp)
case err := <-errChan:
if firstErr == nil {
cancel()
firstErr = err
}
}
}
return resps, firstErr
}
Loading…
Cancel
Save