Enable results cache for volume queries (#10403)

This PR enables the results cache for volume queries to the
`index/volume` and `index/volume_range` endpoints
pull/10420/head
Trevor Whitney 2 years ago committed by GitHub
parent 60f57607b7
commit a05744a385
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 18
      docs/sources/configure/_index.md
  2. 10
      pkg/logql/metrics.go
  3. 29
      pkg/logqlmodel/stats/context.go
  4. 208
      pkg/logqlmodel/stats/stats.pb.go
  5. 4
      pkg/logqlmodel/stats/stats.proto
  6. 6
      pkg/loki/config_wrapper.go
  7. 61
      pkg/loki/config_wrapper_test.go
  8. 16
      pkg/loki/modules.go
  9. 11
      pkg/querier/http_test.go
  10. 19
      pkg/querier/queryrange/codec_test.go
  11. 9
      pkg/querier/queryrange/prometheus_test.go
  12. 3
      pkg/querier/queryrange/querysharding.go
  13. 94
      pkg/querier/queryrange/roundtrip.go
  14. 11
      pkg/querier/queryrange/roundtrip_test.go
  15. 2
      pkg/querier/queryrange/stats.go
  16. 0
      pkg/querier/queryrange/volume.go
  17. 134
      pkg/querier/queryrange/volume_cache.go
  18. 334
      pkg/querier/queryrange/volume_cache_test.go
  19. 0
      pkg/querier/queryrange/volume_test.go
  20. 9
      pkg/util/marshal/legacy/marshal_test.go
  21. 27
      pkg/util/marshal/marshal_test.go

@ -824,6 +824,23 @@ index_stats_results_cache:
# compression. Supported values are: 'snappy' and ''.
# CLI flag: -frontend.index-stats-results-cache.compression
[compression: <string> | default = ""]
# Cache volume query results.
# CLI flag: -querier.cache-volume-results
[cache_volume_results: <boolean> | default = false]
# If a cache config is not specified and cache_volume_results is true, the
# config for the results cache is used.
volume_results_cache:
# The cache block configures the cache backend.
# The CLI flags prefix for this block configuration is:
# frontend.volume-results-cache
[cache: <cache_config>]
# Use compression in cache. The default is an empty value '', which disables
# compression. Supported values are: 'snappy' and ''.
# CLI flag: -frontend.volume-results-cache.compression
[compression: <string> | default = ""]
```
### ruler
@ -3902,6 +3919,7 @@ The cache block configures the cache backend. The supported CLI flags `<prefix>`
- `frontend`
- `frontend.index-stats-results-cache`
- `frontend.volume-results-cache`
- `store.chunks-cache`
- `store.index-cache-read`
- `store.index-cache-write`

@ -150,6 +150,9 @@ func RecordRangeAndInstantQueryMetrics(
"cache_stats_results_req", stats.Caches.StatsResult.EntriesRequested,
"cache_stats_results_hit", stats.Caches.StatsResult.EntriesFound,
"cache_stats_results_download_time", stats.Caches.StatsResult.CacheDownloadTime(),
"cache_volume_results_req", stats.Caches.VolumeResult.EntriesRequested,
"cache_volume_results_hit", stats.Caches.VolumeResult.EntriesFound,
"cache_volume_results_download_time", stats.Caches.VolumeResult.CacheDownloadTime(),
"cache_result_req", stats.Caches.Result.EntriesRequested,
"cache_result_hit", stats.Caches.Result.EntriesFound,
"cache_result_download_time", stats.Caches.Result.CacheDownloadTime(),
@ -295,6 +298,7 @@ func RecordVolumeQueryMetrics(
ctx context.Context,
log log.Logger,
start, end time.Time,
query string,
status string,
stats logql_stats.Result,
) {
@ -310,15 +314,19 @@ func RecordVolumeQueryMetrics(
latencyType = latencyTypeSlow
}
// we also log queries, useful for troubleshooting slow queries.
level.Info(logger).Log(
"latency", latencyType,
"query_type", queryType,
"query", query,
"query_hash", HashedQuery(query),
"length", end.Sub(start),
"duration", time.Duration(int64(stats.Summary.ExecTime*float64(time.Second))),
"status", status,
"splits", stats.Summary.Splits,
"total_entries", stats.Summary.TotalEntriesReturned,
"cache_volume_results_req", stats.Caches.VolumeResult.EntriesRequested,
"cache_volume_results_hit", stats.Caches.VolumeResult.EntriesFound,
"cache_volume_results_download_time", stats.Caches.VolumeResult.CacheDownloadTime(),
)
execLatency.WithLabelValues(status, queryType, "").

@ -55,11 +55,12 @@ type Context struct {
type CacheType string
const (
ChunkCache CacheType = "chunk" //nolint:staticcheck
IndexCache = "index"
ResultCache = "result"
StatsResultCache = "stats-result"
WriteDedupeCache = "write-dedupe"
ChunkCache CacheType = "chunk" //nolint:staticcheck
IndexCache = "index"
ResultCache = "result"
StatsResultCache = "stats-result"
VolumeResultCache = "volume-result"
WriteDedupeCache = "write-dedupe"
)
// NewContext creates a new statistics context
@ -92,10 +93,11 @@ func (c *Context) Ingester() Ingester {
// Caches returns the cache statistics accumulated so far.
func (c *Context) Caches() Caches {
return Caches{
Chunk: c.caches.Chunk,
Index: c.caches.Index,
Result: c.caches.Result,
StatsResult: c.caches.StatsResult,
Chunk: c.caches.Chunk,
Index: c.caches.Index,
Result: c.caches.Result,
StatsResult: c.caches.StatsResult,
VolumeResult: c.caches.VolumeResult,
}
}
@ -207,6 +209,7 @@ func (c *Caches) Merge(m Caches) {
c.Index.Merge(m.Index)
c.Result.Merge(m.Result)
c.StatsResult.Merge(m.StatsResult)
c.VolumeResult.Merge(m.VolumeResult)
}
func (c *Cache) Merge(m Cache) {
@ -426,6 +429,8 @@ func (c *Context) getCacheStatsByType(t CacheType) *Cache {
stats = &c.caches.Result
case StatsResultCache:
stats = &c.caches.StatsResult
case VolumeResultCache:
stats = &c.caches.VolumeResult
default:
return nil
}
@ -501,6 +506,12 @@ func (c Caches) Log(log log.Logger) {
"Cache.StatsResult.EntriesStored", c.StatsResult.EntriesStored,
"Cache.StatsResult.BytesSent", humanize.Bytes(uint64(c.StatsResult.BytesSent)),
"Cache.StatsResult.BytesReceived", humanize.Bytes(uint64(c.StatsResult.BytesReceived)),
"Cache.VolumeResult.Requests", c.VolumeResult.Requests,
"Cache.VolumeResult.EntriesRequested", c.VolumeResult.EntriesRequested,
"Cache.VolumeResult.EntriesFound", c.VolumeResult.EntriesFound,
"Cache.VolumeResult.EntriesStored", c.VolumeResult.EntriesStored,
"Cache.VolumeResult.BytesSent", humanize.Bytes(uint64(c.VolumeResult.BytesSent)),
"Cache.VolumeResult.BytesReceived", humanize.Bytes(uint64(c.VolumeResult.BytesReceived)),
"Cache.Result.DownloadTime", c.Result.CacheDownloadTime(),
"Cache.Result.Requests", c.Result.Requests,
"Cache.Result.EntriesRequested", c.Result.EntriesRequested,

@ -95,10 +95,11 @@ func (m *Result) GetCaches() Caches {
}
type Caches struct {
Chunk Cache `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk"`
Index Cache `protobuf:"bytes,2,opt,name=index,proto3" json:"index"`
Result Cache `protobuf:"bytes,3,opt,name=result,proto3" json:"result"`
StatsResult Cache `protobuf:"bytes,4,opt,name=statsResult,proto3" json:"statsResult"`
Chunk Cache `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk"`
Index Cache `protobuf:"bytes,2,opt,name=index,proto3" json:"index"`
Result Cache `protobuf:"bytes,3,opt,name=result,proto3" json:"result"`
StatsResult Cache `protobuf:"bytes,4,opt,name=statsResult,proto3" json:"statsResult"`
VolumeResult Cache `protobuf:"bytes,5,opt,name=volumeResult,proto3" json:"volumeResult"`
}
func (m *Caches) Reset() { *m = Caches{} }
@ -161,6 +162,13 @@ func (m *Caches) GetStatsResult() Cache {
return Cache{}
}
func (m *Caches) GetVolumeResult() Cache {
if m != nil {
return m.VolumeResult
}
return Cache{}
}
// Summary is the summary of a query statistics.
type Summary struct {
// Total bytes processed per second.
@ -732,76 +740,78 @@ func init() {
func init() { proto.RegisterFile("pkg/logqlmodel/stats/stats.proto", fileDescriptor_6cdfe5d2aea33ebb) }
var fileDescriptor_6cdfe5d2aea33ebb = []byte{
// 1102 bytes of a gzipped FileDescriptorProto
// 1123 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0x4f, 0x6f, 0xe3, 0x44,
0x14, 0x8f, 0x5b, 0x9c, 0x74, 0x67, 0xdb, 0x6e, 0x77, 0xda, 0x65, 0x0d, 0x48, 0x76, 0x09, 0x42,
0x54, 0x02, 0x35, 0xe2, 0x8f, 0x84, 0x40, 0xac, 0x84, 0xdc, 0xa5, 0x52, 0xa5, 0x02, 0xe5, 0x15,
0x2e, 0xdc, 0x1c, 0x7b, 0x9a, 0x98, 0x3a, 0x76, 0xea, 0xb1, 0x61, 0x7b, 0xe3, 0x23, 0xf0, 0x2d,
0xe0, 0xc2, 0x89, 0x2f, 0xb1, 0xc7, 0xde, 0xd8, 0x93, 0x45, 0xd3, 0x0b, 0xf8, 0xb4, 0x12, 0x77,
0x84, 0xe6, 0xcd, 0xc4, 0xff, 0xe2, 0xa8, 0xbd, 0xc4, 0xf3, 0x7e, 0x7f, 0xe6, 0x8d, 0xc7, 0x7e,
0xcf, 0x13, 0xb2, 0x3b, 0x3d, 0x1f, 0x0d, 0x82, 0x68, 0x74, 0x11, 0x4c, 0x22, 0x8f, 0x05, 0x03,
0x9e, 0x38, 0x09, 0x97, 0xbf, 0xfb, 0xd3, 0x38, 0x4a, 0x22, 0xaa, 0x63, 0xf0, 0xfa, 0xce, 0x28,
0x1a, 0x45, 0x88, 0x0c, 0xc4, 0x48, 0x92, 0xfd, 0x7f, 0x35, 0xd2, 0x05, 0xc6, 0xd3, 0x20, 0xa1,
0x9f, 0x90, 0x1e, 0x4f, 0x27, 0x13, 0x27, 0xbe, 0x34, 0xb4, 0x5d, 0x6d, 0xef, 0xfe, 0x07, 0x9b,
0xfb, 0x72, 0x9a, 0x53, 0x89, 0xda, 0x0f, 0x9e, 0x67, 0x56, 0x27, 0xcf, 0xac, 0xb9, 0x0c, 0xe6,
0x03, 0x61, 0xbd, 0x48, 0x59, 0xec, 0xb3, 0xd8, 0x58, 0xa9, 0x59, 0xbf, 0x91, 0x68, 0x69, 0x55,
0x32, 0x98, 0x0f, 0xe8, 0x13, 0xb2, 0xe6, 0x87, 0x23, 0xc6, 0x13, 0x16, 0x1b, 0xab, 0xe8, 0x7d,
0xa0, 0xbc, 0x47, 0x0a, 0xb6, 0xb7, 0x94, 0xb9, 0x10, 0x42, 0x31, 0xa2, 0x1f, 0x91, 0xae, 0xeb,
0xb8, 0x63, 0xc6, 0x8d, 0x57, 0xd0, 0xbc, 0xa1, 0xcc, 0x07, 0x08, 0xda, 0x1b, 0xca, 0xaa, 0xa3,
0x08, 0x94, 0xb6, 0xff, 0x8f, 0x46, 0xba, 0x52, 0x41, 0xdf, 0x27, 0xba, 0x3b, 0x4e, 0xc3, 0x73,
0x75, 0xcf, 0xeb, 0x55, 0x7f, 0xc5, 0x2e, 0x24, 0x20, 0x2f, 0xc2, 0xe2, 0x87, 0x1e, 0x7b, 0xa6,
0xee, 0x75, 0x89, 0x05, 0x25, 0x20, 0x2f, 0x62, 0x99, 0x31, 0xee, 0xb2, 0xba, 0xc7, 0xba, 0x67,
0x53, 0x79, 0x94, 0x06, 0xd4, 0x95, 0x1e, 0x90, 0xfb, 0x28, 0x93, 0x0f, 0x48, 0xdd, 0x61, 0xdd,
0xba, 0xad, 0xac, 0x55, 0x21, 0x54, 0x83, 0xfe, 0x1f, 0x5d, 0xd2, 0x53, 0x4f, 0x90, 0x7e, 0x47,
0x1e, 0x0f, 0x2f, 0x13, 0xc6, 0x4f, 0xe2, 0xc8, 0x65, 0x9c, 0x33, 0xef, 0x84, 0xc5, 0xa7, 0xcc,
0x8d, 0x42, 0x0f, 0x6f, 0x7f, 0xd5, 0x7e, 0x23, 0xcf, 0xac, 0x65, 0x12, 0x58, 0x46, 0x88, 0x69,
0x03, 0x3f, 0x6c, 0x9d, 0x76, 0xa5, 0x9c, 0x76, 0x89, 0x04, 0x96, 0x11, 0xf4, 0x88, 0x6c, 0x27,
0x51, 0xe2, 0x04, 0x76, 0x2d, 0x2d, 0xee, 0xe0, 0xaa, 0xfd, 0x38, 0xcf, 0xac, 0x36, 0x1a, 0xda,
0xc0, 0x62, 0xaa, 0xe3, 0x5a, 0x2a, 0xdc, 0xd1, 0xea, 0x54, 0x75, 0x1a, 0xda, 0x40, 0xba, 0x47,
0xd6, 0xd8, 0x33, 0xe6, 0x7e, 0xeb, 0x4f, 0x98, 0xa1, 0xef, 0x6a, 0x7b, 0x9a, 0xbd, 0x2e, 0xde,
0xcd, 0x39, 0x06, 0xc5, 0x88, 0xbe, 0x4b, 0xee, 0x5d, 0xa4, 0x2c, 0x65, 0x28, 0xed, 0xa2, 0x74,
0x23, 0xcf, 0xac, 0x12, 0x84, 0x72, 0x48, 0xf7, 0x09, 0xe1, 0xe9, 0x50, 0x56, 0x05, 0x37, 0x7a,
0xb8, 0xb0, 0xcd, 0x3c, 0xb3, 0x2a, 0x28, 0x54, 0xc6, 0xf4, 0x98, 0xec, 0xe0, 0xea, 0xbe, 0x08,
0x13, 0xe4, 0x58, 0x92, 0xc6, 0x21, 0xf3, 0x8c, 0x35, 0x74, 0x1a, 0x79, 0x66, 0xb5, 0xf2, 0xd0,
0x8a, 0xd2, 0x3e, 0xe9, 0xf2, 0x69, 0xe0, 0x27, 0xdc, 0xb8, 0x87, 0x7e, 0x22, 0xde, 0x46, 0x89,
0x80, 0xba, 0xa2, 0x66, 0xec, 0xc4, 0x1e, 0x37, 0x48, 0x45, 0x83, 0x08, 0xa8, 0x6b, 0xb1, 0xaa,
0x93, 0x88, 0x27, 0x87, 0x7e, 0x90, 0xb0, 0x18, 0x77, 0xcf, 0xb8, 0xdf, 0x58, 0x55, 0x83, 0x87,
0x56, 0x94, 0x5e, 0x92, 0xb7, 0x10, 0xff, 0x2a, 0x0a, 0x8f, 0x44, 0x19, 0x31, 0xef, 0xd8, 0x19,
0xb2, 0x80, 0x37, 0x5e, 0x88, 0x75, 0x9c, 0xfc, 0x9d, 0x3c, 0xb3, 0xee, 0x22, 0x87, 0xbb, 0x88,
0xfa, 0x9f, 0x91, 0x9e, 0xea, 0x5d, 0xa2, 0xdc, 0x79, 0x12, 0xc5, 0xac, 0xd1, 0x21, 0x4e, 0x05,
0x56, 0x96, 0x3b, 0x4a, 0x40, 0x5e, 0xfa, 0xbf, 0xaf, 0x90, 0xb5, 0xa3, 0xb2, 0x45, 0xad, 0x63,
0x46, 0x60, 0xa2, 0x62, 0x65, 0xa5, 0xe9, 0xf6, 0x56, 0x9e, 0x59, 0x35, 0x1c, 0x6a, 0x11, 0x3d,
0x24, 0x14, 0xe3, 0x03, 0xd1, 0x72, 0xf8, 0x97, 0x4e, 0x82, 0x5e, 0x59, 0x4e, 0xaf, 0xe6, 0x99,
0xd5, 0xc2, 0x42, 0x0b, 0x56, 0x64, 0xb7, 0x31, 0xe6, 0xaa, 0x7a, 0xca, 0xec, 0x0a, 0x87, 0x5a,
0x44, 0x3f, 0x25, 0x9b, 0xe5, 0xbb, 0x7f, 0xca, 0xc2, 0x44, 0x95, 0x0a, 0xcd, 0x33, 0xab, 0xc1,
0x40, 0x23, 0x2e, 0xf7, 0x4b, 0xbf, 0xf3, 0x7e, 0xfd, 0xb9, 0x42, 0x74, 0xe4, 0x8b, 0xc4, 0xf2,
0x26, 0x80, 0x9d, 0xa9, 0xc6, 0x54, 0x26, 0x2e, 0x18, 0x68, 0xc4, 0xf4, 0x6b, 0xf2, 0xa8, 0x82,
0x3c, 0x8d, 0x7e, 0x0a, 0x83, 0xc8, 0xf1, 0x8a, 0x5d, 0x7b, 0x2d, 0xcf, 0xac, 0x76, 0x01, 0xb4,
0xc3, 0xe2, 0x19, 0xb8, 0x35, 0x0c, 0x2b, 0x79, 0xb5, 0x7c, 0x06, 0x8b, 0x2c, 0xb4, 0x60, 0xe5,
0x37, 0xa6, 0xd1, 0xc1, 0x05, 0xb6, 0xe4, 0x1b, 0x33, 0x4f, 0x0d, 0xec, 0x8c, 0x1f, 0xb2, 0xc4,
0x1d, 0x17, 0xfd, 0xa6, 0x9a, 0xba, 0xc6, 0x42, 0x0b, 0xd6, 0xff, 0x55, 0x27, 0x3a, 0xe6, 0x11,
0x3b, 0x3b, 0x66, 0x8e, 0x27, 0x93, 0x8a, 0x97, 0xbd, 0xfa, 0x48, 0xeb, 0x0c, 0x34, 0xe2, 0x9a,
0x57, 0x16, 0xb4, 0xde, 0xe2, 0x95, 0xa5, 0xdc, 0x88, 0xe9, 0x01, 0x79, 0xe8, 0x31, 0x37, 0x9a,
0x4c, 0x63, 0xac, 0x2c, 0x99, 0xba, 0x8b, 0xf6, 0x47, 0x79, 0x66, 0x2d, 0x92, 0xb0, 0x08, 0x35,
0x27, 0x91, 0x6b, 0xe8, 0xb5, 0x4f, 0x22, 0x97, 0xb1, 0x08, 0xd1, 0x27, 0xe4, 0x41, 0x73, 0x1d,
0xb2, 0x5b, 0x6e, 0xe7, 0x99, 0xd5, 0xa4, 0xa0, 0x09, 0x08, 0x3b, 0xbe, 0x26, 0x4f, 0xd3, 0x69,
0xe0, 0xbb, 0x8e, 0xb0, 0xdf, 0x2b, 0xed, 0x0d, 0x0a, 0x9a, 0x80, 0xb0, 0x4f, 0x1b, 0x5d, 0x91,
0x94, 0xf6, 0x06, 0x05, 0x4d, 0x80, 0xfe, 0x40, 0xcc, 0x62, 0x63, 0x5b, 0x7b, 0x97, 0xea, 0xb1,
0xfd, 0x3c, 0xb3, 0x6e, 0x51, 0xc2, 0x2d, 0x3c, 0xe5, 0xe4, 0xcd, 0xea, 0xee, 0xb5, 0xa7, 0x93,
0x5d, 0xf7, 0xed, 0x3c, 0xb3, 0x6e, 0x17, 0xc3, 0xed, 0x92, 0xfe, 0x7f, 0x2b, 0x44, 0xc7, 0x33,
0x8d, 0x68, 0x59, 0x4c, 0x7e, 0x9f, 0x0e, 0xa3, 0x34, 0xac, 0x35, 0xcc, 0x2a, 0x0e, 0xb5, 0x88,
0x7e, 0x4e, 0xb6, 0xd8, 0xfc, 0xab, 0x76, 0x91, 0x8a, 0xd6, 0x2b, 0x0b, 0x5f, 0xb7, 0x77, 0xf2,
0xcc, 0x5a, 0xe0, 0x60, 0x01, 0xa1, 0x1f, 0x93, 0x0d, 0x85, 0x61, 0x2f, 0x92, 0x27, 0x0d, 0xdd,
0x7e, 0x98, 0x67, 0x56, 0x9d, 0x80, 0x7a, 0x28, 0x8c, 0x78, 0x34, 0x02, 0xe6, 0x32, 0xff, 0xc7,
0xe2, 0x5c, 0x81, 0xc6, 0x1a, 0x01, 0xf5, 0x50, 0x9c, 0x10, 0x10, 0xc0, 0x0e, 0x2b, 0x4b, 0x0a,
0x4f, 0x08, 0x05, 0x08, 0xe5, 0x50, 0x1c, 0x3c, 0x62, 0xb9, 0x56, 0x59, 0x3f, 0xba, 0x3c, 0x78,
0xcc, 0x31, 0x28, 0x46, 0x62, 0x03, 0xbd, 0x6a, 0xc7, 0xea, 0x95, 0x3d, 0xbf, 0x8a, 0x43, 0x2d,
0xb2, 0x87, 0x57, 0xd7, 0x66, 0xe7, 0xc5, 0xb5, 0xd9, 0x79, 0x79, 0x6d, 0x6a, 0x3f, 0xcf, 0x4c,
0xed, 0xb7, 0x99, 0xa9, 0x3d, 0x9f, 0x99, 0xda, 0xd5, 0xcc, 0xd4, 0xfe, 0x9a, 0x99, 0xda, 0xdf,
0x33, 0xb3, 0xf3, 0x72, 0x66, 0x6a, 0xbf, 0xdc, 0x98, 0x9d, 0xab, 0x1b, 0xb3, 0xf3, 0xe2, 0xc6,
0xec, 0x7c, 0xff, 0xde, 0xc8, 0x4f, 0xc6, 0xe9, 0x70, 0xdf, 0x8d, 0x26, 0x83, 0x51, 0xec, 0x9c,
0x39, 0xa1, 0x33, 0x08, 0xa2, 0x73, 0x7f, 0xd0, 0xf6, 0xbf, 0x64, 0xd8, 0xc5, 0x7f, 0x1d, 0x1f,
0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xd2, 0x99, 0xf1, 0xb6, 0x0c, 0x00, 0x00,
0x14, 0x8f, 0x53, 0x9c, 0xb4, 0xd3, 0xbf, 0x3b, 0xed, 0xb2, 0x01, 0x24, 0x7b, 0x09, 0x42, 0xac,
0x04, 0x6a, 0xc4, 0x1f, 0x09, 0x81, 0x58, 0x09, 0xb9, 0x4b, 0xa5, 0x4a, 0x05, 0xca, 0x2b, 0x5c,
0xb8, 0x39, 0xf6, 0x34, 0x31, 0x75, 0xec, 0xd4, 0x7f, 0x96, 0xed, 0x8d, 0x8f, 0xc0, 0xb7, 0x80,
0x0b, 0x07, 0xc4, 0x97, 0xd8, 0x63, 0x6f, 0xec, 0xc9, 0xa2, 0xe9, 0x05, 0xf9, 0xb4, 0x12, 0x77,
0xb4, 0x9a, 0x37, 0x13, 0xdb, 0xe3, 0x38, 0x6a, 0x2f, 0xf1, 0xbc, 0xdf, 0xfb, 0xfd, 0xde, 0x1b,
0xcf, 0xcc, 0x7b, 0x9e, 0x90, 0x87, 0xd3, 0xf3, 0xd1, 0xc0, 0x0f, 0x47, 0x17, 0xfe, 0x24, 0x74,
0x99, 0x3f, 0x88, 0x13, 0x3b, 0x89, 0xc5, 0xef, 0xfe, 0x34, 0x0a, 0x93, 0x90, 0xea, 0x68, 0xbc,
0xb9, 0x37, 0x0a, 0x47, 0x21, 0x22, 0x03, 0x3e, 0x12, 0xce, 0xfe, 0x7f, 0x1a, 0xe9, 0x00, 0x8b,
0x53, 0x3f, 0xa1, 0x9f, 0x91, 0x6e, 0x9c, 0x4e, 0x26, 0x76, 0x74, 0xd9, 0xd3, 0x1e, 0x6a, 0x8f,
0xd6, 0x3f, 0xda, 0xda, 0x17, 0x61, 0x4e, 0x05, 0x6a, 0x6d, 0x3f, 0xcf, 0xcc, 0x56, 0x9e, 0x99,
0x73, 0x1a, 0xcc, 0x07, 0x5c, 0x7a, 0x91, 0xb2, 0xc8, 0x63, 0x51, 0xaf, 0xad, 0x48, 0xbf, 0x13,
0x68, 0x29, 0x95, 0x34, 0x98, 0x0f, 0xe8, 0x63, 0xb2, 0xea, 0x05, 0x23, 0x16, 0x27, 0x2c, 0xea,
0xad, 0xa0, 0x76, 0x5b, 0x6a, 0x8f, 0x24, 0x6c, 0xed, 0x48, 0x71, 0x41, 0x84, 0x62, 0x44, 0x3f,
0x21, 0x1d, 0xc7, 0x76, 0xc6, 0x2c, 0xee, 0xbd, 0x86, 0xe2, 0x4d, 0x29, 0x3e, 0x40, 0xd0, 0xda,
0x94, 0x52, 0x1d, 0x49, 0x20, 0xb9, 0xfd, 0x3f, 0xdb, 0xa4, 0x23, 0x18, 0xf4, 0x43, 0xa2, 0x3b,
0xe3, 0x34, 0x38, 0x97, 0xef, 0xbc, 0x51, 0xd5, 0x57, 0xe4, 0x9c, 0x02, 0xe2, 0xc1, 0x25, 0x5e,
0xe0, 0xb2, 0x67, 0xf2, 0x5d, 0x97, 0x48, 0x90, 0x02, 0xe2, 0xc1, 0xa7, 0x19, 0xe1, 0x2a, 0xcb,
0x77, 0x54, 0x35, 0x5b, 0x52, 0x23, 0x39, 0x20, 0x9f, 0xf4, 0x80, 0xac, 0x23, 0x4d, 0x6c, 0x90,
0x7c, 0x43, 0x55, 0xba, 0x2b, 0xa5, 0x55, 0x22, 0x54, 0x0d, 0x7a, 0x48, 0x36, 0x9e, 0x86, 0x7e,
0x3a, 0x61, 0x32, 0x8a, 0xde, 0x10, 0x65, 0x4f, 0x46, 0x51, 0x98, 0xa0, 0x58, 0xfd, 0xbf, 0x3a,
0xa4, 0x2b, 0x4f, 0x02, 0xfd, 0x81, 0x3c, 0x18, 0x5e, 0x26, 0x2c, 0x3e, 0x89, 0x42, 0x87, 0xc5,
0x31, 0x73, 0x4f, 0x58, 0x74, 0xca, 0x9c, 0x30, 0x70, 0x71, 0x19, 0x57, 0xac, 0xb7, 0xf2, 0xcc,
0x5c, 0x46, 0x81, 0x65, 0x0e, 0x1e, 0xd6, 0xf7, 0x82, 0xc6, 0xb0, 0xed, 0x32, 0xec, 0x12, 0x0a,
0x2c, 0x73, 0xd0, 0x23, 0xb2, 0x9b, 0x84, 0x89, 0xed, 0x5b, 0x4a, 0x5a, 0xdc, 0x89, 0x15, 0xeb,
0x41, 0x9e, 0x99, 0x4d, 0x6e, 0x68, 0x02, 0x8b, 0x50, 0xc7, 0x4a, 0x2a, 0xdc, 0x99, 0x6a, 0x28,
0xd5, 0x0d, 0x4d, 0x20, 0x7d, 0x44, 0x56, 0xd9, 0x33, 0xe6, 0x7c, 0xef, 0x4d, 0x18, 0xee, 0x89,
0x66, 0x6d, 0xf0, 0x33, 0x3e, 0xc7, 0xa0, 0x18, 0xd1, 0xf7, 0xc9, 0xda, 0x45, 0xca, 0x52, 0x86,
0xd4, 0x0e, 0x52, 0x37, 0xf3, 0xcc, 0x2c, 0x41, 0x28, 0x87, 0x74, 0x9f, 0x90, 0x38, 0x1d, 0x8a,
0xea, 0x8a, 0x7b, 0x5d, 0x9c, 0xd8, 0x56, 0x9e, 0x99, 0x15, 0x14, 0x2a, 0x63, 0x7a, 0x4c, 0xf6,
0x70, 0x76, 0x5f, 0x05, 0x09, 0xfa, 0x58, 0x92, 0x46, 0x01, 0x73, 0x7b, 0xab, 0xa8, 0xec, 0xe5,
0x99, 0xd9, 0xe8, 0x87, 0x46, 0x94, 0xf6, 0x49, 0x27, 0x9e, 0xfa, 0x5e, 0x12, 0xf7, 0xd6, 0x50,
0x4f, 0xf8, 0xa9, 0x16, 0x08, 0xc8, 0x27, 0x72, 0xc6, 0x76, 0xe4, 0xc6, 0x3d, 0x52, 0xe1, 0x20,
0x02, 0xf2, 0x59, 0xcc, 0xea, 0x24, 0x8c, 0x93, 0x43, 0xcf, 0x4f, 0x58, 0x84, 0xab, 0xd7, 0x5b,
0xaf, 0xcd, 0xaa, 0xe6, 0x87, 0x46, 0x94, 0x5e, 0x92, 0x77, 0x10, 0xff, 0x26, 0x0c, 0x8e, 0x78,
0x39, 0x32, 0xf7, 0xd8, 0x1e, 0x32, 0x3f, 0xae, 0x1d, 0x88, 0x0d, 0x0c, 0xfe, 0x5e, 0x9e, 0x99,
0x77, 0xa1, 0xc3, 0x5d, 0x48, 0xfd, 0x2f, 0x48, 0x57, 0xf6, 0x40, 0xde, 0x36, 0xe2, 0x24, 0x8c,
0x58, 0xad, 0xd3, 0x9c, 0x72, 0xac, 0x6c, 0x1b, 0x48, 0x01, 0xf1, 0xe8, 0xff, 0xd1, 0x26, 0xab,
0x47, 0x65, 0xab, 0xdb, 0xc0, 0x8c, 0xc0, 0x78, 0xcd, 0x8a, 0x4a, 0xd3, 0xad, 0x1d, 0x5e, 0xb6,
0x55, 0x1c, 0x14, 0x8b, 0x1e, 0x12, 0x8a, 0xf6, 0x01, 0x6f, 0x5d, 0xf1, 0xd7, 0x76, 0x82, 0x5a,
0x51, 0x4e, 0xaf, 0xe7, 0x99, 0xd9, 0xe0, 0x85, 0x06, 0xac, 0xc8, 0x6e, 0xa1, 0x1d, 0xcb, 0xea,
0x29, 0xb3, 0x4b, 0x1c, 0x14, 0x8b, 0x7e, 0x4e, 0xb6, 0xca, 0xb3, 0x7f, 0xca, 0x82, 0x44, 0x96,
0x0a, 0xcd, 0x33, 0xb3, 0xe6, 0x81, 0x9a, 0x5d, 0xae, 0x97, 0x7e, 0xe7, 0xf5, 0xfa, 0xbb, 0x4d,
0x74, 0xf4, 0x17, 0x89, 0xc5, 0x4b, 0x00, 0x3b, 0x93, 0x8d, 0xa9, 0x4c, 0x5c, 0x78, 0xa0, 0x66,
0xd3, 0x6f, 0xc9, 0xfd, 0x0a, 0xf2, 0x24, 0xfc, 0x39, 0xf0, 0x43, 0xdb, 0x2d, 0x56, 0xed, 0x8d,
0x3c, 0x33, 0x9b, 0x09, 0xd0, 0x0c, 0xf3, 0x3d, 0x70, 0x14, 0x0c, 0x2b, 0x79, 0xa5, 0xdc, 0x83,
0x45, 0x2f, 0x34, 0x60, 0xe5, 0xb7, 0xaa, 0xf6, 0x25, 0xe0, 0xd8, 0x92, 0x6f, 0xd5, 0x3c, 0x35,
0xb0, 0xb3, 0xf8, 0x90, 0x25, 0xce, 0xb8, 0xe8, 0x37, 0xd5, 0xd4, 0x8a, 0x17, 0x1a, 0xb0, 0xfe,
0x6f, 0x3a, 0xd1, 0x31, 0x0f, 0x5f, 0xd9, 0x31, 0xb3, 0x5d, 0x91, 0x94, 0x1f, 0xf6, 0xea, 0x96,
0xaa, 0x1e, 0xa8, 0xd9, 0x8a, 0x56, 0x14, 0xb4, 0xde, 0xa0, 0x15, 0xa5, 0x5c, 0xb3, 0xe9, 0x01,
0xb9, 0xe7, 0x32, 0x27, 0x9c, 0x4c, 0x23, 0xac, 0x2c, 0x91, 0xba, 0x83, 0xf2, 0xfb, 0x79, 0x66,
0x2e, 0x3a, 0x61, 0x11, 0xaa, 0x07, 0x11, 0x73, 0xe8, 0x36, 0x07, 0x11, 0xd3, 0x58, 0x84, 0xe8,
0x63, 0xb2, 0x5d, 0x9f, 0x87, 0xe8, 0x96, 0xbb, 0x79, 0x66, 0xd6, 0x5d, 0x50, 0x07, 0xb8, 0x1c,
0x8f, 0xc9, 0x93, 0x74, 0xea, 0x7b, 0x8e, 0xcd, 0xe5, 0x6b, 0xa5, 0xbc, 0xe6, 0x82, 0x3a, 0xc0,
0xe5, 0xd3, 0x5a, 0x57, 0x24, 0xa5, 0xbc, 0xe6, 0x82, 0x3a, 0x40, 0x7f, 0x22, 0x46, 0xb1, 0xb0,
0x8d, 0xbd, 0x4b, 0xf6, 0xd8, 0x7e, 0x9e, 0x99, 0xb7, 0x30, 0xe1, 0x16, 0x3f, 0x8d, 0xc9, 0xdb,
0xd5, 0xd5, 0x6b, 0x4e, 0x27, 0xba, 0xee, 0xbb, 0x79, 0x66, 0xde, 0x4e, 0x86, 0xdb, 0x29, 0xfd,
0xff, 0xdb, 0x44, 0xc7, 0x5b, 0x0d, 0x6f, 0x59, 0x4c, 0x7c, 0x9f, 0x0e, 0xc3, 0x34, 0x50, 0x1a,
0x66, 0x15, 0x07, 0xc5, 0xa2, 0x5f, 0x92, 0x1d, 0x36, 0xff, 0xaa, 0x5d, 0xa4, 0xbc, 0xf5, 0x8a,
0xc2, 0xd7, 0xad, 0xbd, 0x3c, 0x33, 0x17, 0x7c, 0xb0, 0x80, 0xd0, 0x4f, 0xc9, 0xa6, 0xc4, 0xb0,
0x17, 0x89, 0x9b, 0x86, 0x6e, 0xdd, 0xcb, 0x33, 0x53, 0x75, 0x80, 0x6a, 0x72, 0x21, 0x5e, 0x8d,
0x80, 0x39, 0xcc, 0x7b, 0x5a, 0xdc, 0x2b, 0x50, 0xa8, 0x38, 0x40, 0x35, 0xf9, 0x0d, 0x01, 0x01,
0xec, 0xb0, 0xa2, 0xa4, 0xf0, 0x86, 0x50, 0x80, 0x50, 0x0e, 0xf9, 0xc5, 0x23, 0x12, 0x73, 0x15,
0xf5, 0xa3, 0x8b, 0x8b, 0xc7, 0x1c, 0x83, 0x62, 0xc4, 0x17, 0xd0, 0xad, 0x76, 0xac, 0x6e, 0xd9,
0xf3, 0xab, 0x38, 0x28, 0x96, 0x35, 0xbc, 0xba, 0x36, 0x5a, 0x2f, 0xae, 0x8d, 0xd6, 0xcb, 0x6b,
0x43, 0xfb, 0x65, 0x66, 0x68, 0xbf, 0xcf, 0x0c, 0xed, 0xf9, 0xcc, 0xd0, 0xae, 0x66, 0x86, 0xf6,
0xcf, 0xcc, 0xd0, 0xfe, 0x9d, 0x19, 0xad, 0x97, 0x33, 0x43, 0xfb, 0xf5, 0xc6, 0x68, 0x5d, 0xdd,
0x18, 0xad, 0x17, 0x37, 0x46, 0xeb, 0xc7, 0x0f, 0x46, 0x5e, 0x32, 0x4e, 0x87, 0xfb, 0x4e, 0x38,
0x19, 0x8c, 0x22, 0xfb, 0xcc, 0x0e, 0xec, 0x81, 0x1f, 0x9e, 0x7b, 0x83, 0xa6, 0xff, 0x37, 0xc3,
0x0e, 0xfe, 0x7b, 0xf9, 0xf8, 0x55, 0x00, 0x00, 0x00, 0xff, 0xff, 0xba, 0xe9, 0x4e, 0xb9, 0xfe,
0x0c, 0x00, 0x00,
}
func (this *Result) Equal(that interface{}) bool {
@ -868,6 +878,9 @@ func (this *Caches) Equal(that interface{}) bool {
if !this.StatsResult.Equal(&that1.StatsResult) {
return false
}
if !this.VolumeResult.Equal(&that1.VolumeResult) {
return false
}
return true
}
func (this *Summary) Equal(that interface{}) bool {
@ -1130,12 +1143,13 @@ func (this *Caches) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s := make([]string, 0, 9)
s = append(s, "&stats.Caches{")
s = append(s, "Chunk: "+strings.Replace(this.Chunk.GoString(), `&`, ``, 1)+",\n")
s = append(s, "Index: "+strings.Replace(this.Index.GoString(), `&`, ``, 1)+",\n")
s = append(s, "Result: "+strings.Replace(this.Result.GoString(), `&`, ``, 1)+",\n")
s = append(s, "StatsResult: "+strings.Replace(this.StatsResult.GoString(), `&`, ``, 1)+",\n")
s = append(s, "VolumeResult: "+strings.Replace(this.VolumeResult.GoString(), `&`, ``, 1)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@ -1323,6 +1337,16 @@ func (m *Caches) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
{
size, err := m.VolumeResult.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintStats(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
{
size, err := m.StatsResult.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@ -1758,6 +1782,8 @@ func (m *Caches) Size() (n int) {
n += 1 + l + sovStats(uint64(l))
l = m.StatsResult.Size()
n += 1 + l + sovStats(uint64(l))
l = m.VolumeResult.Size()
n += 1 + l + sovStats(uint64(l))
return n
}
@ -1957,6 +1983,7 @@ func (this *Caches) String() string {
`Index:` + strings.Replace(strings.Replace(this.Index.String(), "Cache", "Cache", 1), `&`, ``, 1) + `,`,
`Result:` + strings.Replace(strings.Replace(this.Result.String(), "Cache", "Cache", 1), `&`, ``, 1) + `,`,
`StatsResult:` + strings.Replace(strings.Replace(this.StatsResult.String(), "Cache", "Cache", 1), `&`, ``, 1) + `,`,
`VolumeResult:` + strings.Replace(strings.Replace(this.VolumeResult.String(), "Cache", "Cache", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
@ -2408,6 +2435,39 @@ func (m *Caches) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field VolumeResult", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStats
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStats
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStats
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.VolumeResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipStats(dAtA[iNdEx:])

@ -45,6 +45,10 @@ message Caches {
(gogoproto.nullable) = false,
(gogoproto.jsontag) = "statsResult"
];
Cache volumeResult = 5 [
(gogoproto.nullable) = false,
(gogoproto.jsontag) = "volumeResult"
];
}
// Summary is the summary of a query statistics.

@ -581,6 +581,12 @@ func applyFIFOCacheConfig(r *ConfigWrapper) {
// We use the same config as the query range results cache.
r.QueryRange.StatsCacheConfig.CacheConfig = r.QueryRange.ResultsCacheConfig.CacheConfig
}
volumeCacheConfig := r.QueryRange.VolumeCacheConfig.CacheConfig
if !cache.IsCacheConfigured(volumeCacheConfig) {
// We use the same config as the query range results cache.
r.QueryRange.VolumeCacheConfig.CacheConfig = r.QueryRange.ResultsCacheConfig.CacheConfig
}
}
func applyIngesterFinalSleep(cfg *ConfigWrapper) {

@ -856,6 +856,13 @@ query_range:
})
}
const defaultResulsCacheString = `---
query_range:
results_cache:
cache:
memcached_client:
host: memcached.host.org`
func TestDefaultFIFOCacheBehavior(t *testing.T) {
t.Run("for the chunk cache config", func(t *testing.T) {
t.Run("no FIFO cache enabled by default if Redis is set", func(t *testing.T) {
@ -966,14 +973,7 @@ query_range:
})
t.Run("no FIFO cache enabled by default if Memcache is set", func(t *testing.T) {
configFileString := `---
query_range:
results_cache:
cache:
memcached_client:
host: memcached.host.org`
config, _, _ := configWrapperFromYAML(t, configFileString, nil)
config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil)
assert.EqualValues(t, "memcached.host.org", config.QueryRange.ResultsCacheConfig.CacheConfig.MemcacheClient.Host)
assert.False(t, config.QueryRange.ResultsCacheConfig.CacheConfig.EnableFifoCache)
})
@ -1015,6 +1015,51 @@ query_range:
config, _, _ := configWrapperFromYAML(t, minimalConfig, nil)
assert.True(t, config.QueryRange.StatsCacheConfig.CacheConfig.EnableFifoCache)
})
t.Run("gets results cache config if not configured directly", func(t *testing.T) {
config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil)
assert.EqualValues(t, "memcached.host.org", config.QueryRange.StatsCacheConfig.CacheConfig.MemcacheClient.Host)
assert.False(t, config.QueryRange.StatsCacheConfig.CacheConfig.EnableFifoCache)
})
})
t.Run("for the volume results cache config", func(t *testing.T) {
t.Run("no FIFO cache enabled by default if Redis is set", func(t *testing.T) {
configFileString := `---
query_range:
volume_results_cache:
cache:
redis:
endpoint: endpoint.redis.org`
config, _, _ := configWrapperFromYAML(t, configFileString, nil)
assert.EqualValues(t, config.QueryRange.VolumeCacheConfig.CacheConfig.Redis.Endpoint, "endpoint.redis.org")
assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EnableFifoCache)
})
t.Run("no FIFO cache enabled by default if Memcache is set", func(t *testing.T) {
configFileString := `---
query_range:
volume_results_cache:
cache:
memcached_client:
host: memcached.host.org`
config, _, _ := configWrapperFromYAML(t, configFileString, nil)
assert.EqualValues(t, "memcached.host.org", config.QueryRange.VolumeCacheConfig.CacheConfig.MemcacheClient.Host)
assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EnableFifoCache)
})
t.Run("FIFO cache is enabled by default if no other cache is set", func(t *testing.T) {
config, _, _ := configWrapperFromYAML(t, minimalConfig, nil)
assert.True(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EnableFifoCache)
})
t.Run("gets results cache config if not configured directly", func(t *testing.T) {
config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil)
assert.EqualValues(t, "memcached.host.org", config.QueryRange.VolumeCacheConfig.CacheConfig.MemcacheClient.Host)
assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EnableFifoCache)
})
})
}

@ -372,6 +372,8 @@ func (t *Loki) initQuerier() (services.Service, error) {
t.querierAPI = querier.NewQuerierAPI(t.Cfg.Querier, t.Querier, t.Overrides, logger)
indexStatsHTTPMiddleware := querier.WrapQuerySpanAndTimeout("query.IndexStats", t.querierAPI)
volumeHTTPMiddleware := querier.WrapQuerySpanAndTimeout("query.VolumeInstant", t.querierAPI)
volumeRangeHTTPMiddleware := querier.WrapQuerySpanAndTimeout("query.VolumeRange", t.querierAPI)
if t.supportIndexDeleteRequest() && t.Cfg.CompactorConfig.RetentionEnabled {
toMerge = append(
@ -383,6 +385,16 @@ func (t *Loki) initQuerier() (services.Service, error) {
queryrangebase.CacheGenNumberHeaderSetterMiddleware(t.cacheGenerationLoader),
indexStatsHTTPMiddleware,
)
volumeHTTPMiddleware = middleware.Merge(
queryrangebase.CacheGenNumberHeaderSetterMiddleware(t.cacheGenerationLoader),
volumeHTTPMiddleware,
)
volumeRangeHTTPMiddleware = middleware.Merge(
queryrangebase.CacheGenNumberHeaderSetterMiddleware(t.cacheGenerationLoader),
volumeRangeHTTPMiddleware,
)
}
labelsHTTPMiddleware := querier.WrapQuerySpanAndTimeout("query.Label", t.querierAPI)
@ -417,8 +429,8 @@ func (t *Loki) initQuerier() (services.Service, error) {
"/loki/api/v1/series": querier.WrapQuerySpanAndTimeout("query.Series", t.querierAPI).Wrap(http.HandlerFunc(t.querierAPI.SeriesHandler)),
"/loki/api/v1/index/stats": indexStatsHTTPMiddleware.Wrap(http.HandlerFunc(t.querierAPI.IndexStatsHandler)),
"/loki/api/v1/index/volume": querier.WrapQuerySpanAndTimeout("query.VolumeInstant", t.querierAPI).Wrap(http.HandlerFunc(t.querierAPI.VolumeInstantHandler)),
"/loki/api/v1/index/volume_range": querier.WrapQuerySpanAndTimeout("query.VolumeRange", t.querierAPI).Wrap(http.HandlerFunc(t.querierAPI.VolumeRangeHandler)),
"/loki/api/v1/index/volume": volumeHTTPMiddleware.Wrap(http.HandlerFunc(t.querierAPI.VolumeInstantHandler)),
"/loki/api/v1/index/volume_range": volumeRangeHTTPMiddleware.Wrap(http.HandlerFunc(t.querierAPI.VolumeRangeHandler)),
"/api/prom/query": middleware.Merge(
httpMiddleware,

@ -87,7 +87,16 @@ var (
"requests": 0,
"downloadTime": 0
},
"statsResult": {
"statsResult": {
"entriesFound": 0,
"entriesRequested": 0,
"entriesStored": 0,
"bytesReceived": 0,
"bytesSent": 0,
"requests": 0,
"downloadTime": 0
},
"volumeResult": {
"entriesFound": 0,
"entriesRequested": 0,
"entriesStored": 0,

@ -1395,7 +1395,16 @@ var (
"requests": 0,
"downloadTime": 0
},
"statsResult": {
"statsResult": {
"entriesFound": 0,
"entriesRequested": 0,
"entriesStored": 0,
"bytesReceived": 0,
"bytesSent": 0,
"requests": 0,
"downloadTime": 0
},
"volumeResult": {
"entriesFound": 0,
"entriesRequested": 0,
"entriesStored": 0,
@ -1632,9 +1641,11 @@ var (
},
Caches: stats.Caches{
Chunk: stats.Cache{},
Index: stats.Cache{},
Result: stats.Cache{},
Chunk: stats.Cache{},
Index: stats.Cache{},
StatsResult: stats.Cache{},
VolumeResult: stats.Cache{},
Result: stats.Cache{},
},
}
)

@ -83,6 +83,15 @@ var emptyStats = `"stats": {
"requests": 0,
"downloadTime": 0
},
"volumeResult": {
"entriesFound": 0,
"entriesRequested": 0,
"entriesStored": 0,
"bytesReceived": 0,
"bytesSent": 0,
"requests": 0,
"downloadTime": 0
},
"result": {
"entriesFound": 0,
"entriesRequested": 0,

@ -225,8 +225,9 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que
return nil, err
}
// Merge index stats result cache stats from shard resolver into the query stats.
// Merge index and volume stats result cache stats from shard resolver into the query stats.
res.Statistics.Caches.StatsResult.Merge(resolverStats.Caches().StatsResult)
res.Statistics.Caches.VolumeResult.Merge(resolverStats.Caches().VolumeResult)
value, err := marshal.NewResultValue(res.Data)
if err != nil {

@ -33,6 +33,8 @@ type Config struct {
Transformer UserIDTransformer `yaml:"-"`
CacheIndexStatsResults bool `yaml:"cache_index_stats_results"`
StatsCacheConfig IndexStatsCacheConfig `yaml:"index_stats_results_cache" doc:"description=If a cache config is not specified and cache_index_stats_results is true, the config for the results cache is used."`
CacheVolumeResults bool `yaml:"cache_volume_results"`
VolumeCacheConfig VolumeCacheConfig `yaml:"volume_results_cache" doc:"description=If a cache config is not specified and cache_volume_results is true, the config for the results cache is used."`
}
// RegisterFlags adds the flags required to configure this flag set.
@ -40,6 +42,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.Config.RegisterFlags(f)
f.BoolVar(&cfg.CacheIndexStatsResults, "querier.cache-index-stats-results", false, "Cache index stats query results.")
cfg.StatsCacheConfig.RegisterFlags(f)
f.BoolVar(&cfg.CacheVolumeResults, "querier.cache-volume-results", false, "Cache volume query results.")
cfg.VolumeCacheConfig.RegisterFlags(f)
}
// Validate validates the config.
@ -105,6 +109,7 @@ func NewTripperware(
var (
resultsCache cache.Cache
statsCache cache.Cache
volumeCache cache.Cache
err error
)
@ -129,6 +134,20 @@ func NewTripperware(
}
}
if cfg.CacheVolumeResults {
// If the volume cache is not configured, use the results cache config.
cacheCfg := cfg.VolumeCacheConfig.ResultsCacheConfig
if !cache.IsCacheConfigured(cacheCfg.CacheConfig) {
level.Debug(log).Log("msg", "using results cache config for volume cache")
cacheCfg = cfg.ResultsCacheConfig
}
volumeCache, err = newResultsCacheFromConfig(cacheCfg, registerer, log, stats.VolumeResultCache)
if err != nil {
return nil, nil, err
}
}
var codec queryrangebase.Codec = DefaultCodec
if cfg.RequiredQueryResponseFormat == "protobuf" {
codec = &RequestProtobufCodec{}
@ -173,7 +192,7 @@ func NewTripperware(
return nil, nil, err
}
seriesVolumeTripperware, err := NewVolumeTripperware(cfg, log, limits, schema, codec, statsCache, cacheGenNumLoader, retentionEnabled, metrics)
seriesVolumeTripperware, err := NewVolumeTripperware(cfg, log, limits, schema, codec, volumeCache, cacheGenNumLoader, retentionEnabled, metrics)
if err != nil {
return nil, nil, err
}
@ -191,7 +210,7 @@ func NewTripperware(
)
return newRoundTripper(log, next, limitedRT, logFilterRT, metricRT, seriesRT, labelsRT, instantRT, statsRT, seriesVolumeRT, limits)
}, StopperWrapper{resultsCache, statsCache}, nil
}, StopperWrapper{resultsCache, statsCache, volumeCache}, nil
}
type roundTripper struct {
@ -787,15 +806,56 @@ func NewVolumeTripperware(
retentionEnabled bool,
metrics *Metrics,
) (queryrangebase.Tripperware, error) {
labelVolumeCfg := cfg
labelVolumeCfg.CacheIndexStatsResults = false
statsTw, err := NewIndexStatsTripperware(labelVolumeCfg, log, limits, schema, codec, c, cacheGenNumLoader, retentionEnabled, metrics)
// Parallelize the volume requests, so it doesn't send a huge request to a single index-gw (i.e. {app=~".+"} for 30d).
// Indices are sharded by 24 hours, so we split the volume request in 24h intervals.
limits = WithSplitByLimits(limits, 24*time.Hour)
var cacheMiddleware queryrangebase.Middleware
if cfg.CacheVolumeResults {
var err error
cacheMiddleware, err = NewVolumeCacheMiddleware(
log,
limits,
codec,
c,
cacheGenNumLoader,
func(_ context.Context, r queryrangebase.Request) bool {
return !r.GetCachingOptions().Disabled
},
func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int {
return MinWeightedParallelism(
ctx,
tenantIDs,
schema.Configs,
limits,
model.Time(r.GetStart()),
model.Time(r.GetEnd()),
)
},
retentionEnabled,
cfg.Transformer,
metrics.ResultsCacheMetrics,
)
if err != nil {
return nil, err
}
}
indexTw, err := sharedIndexTripperware(
cacheMiddleware,
cfg,
codec,
limits,
log,
metrics,
schema,
)
if err != nil {
return nil, err
}
return volumeFeatureFlagRoundTripper(
volumeRangeTripperware(codec, statsTw),
volumeRangeTripperware(codec, indexTw),
limits,
), nil
}
@ -895,6 +955,26 @@ func NewIndexStatsTripperware(
}
}
return sharedIndexTripperware(
cacheMiddleware,
cfg,
codec,
limits,
log,
metrics,
schema,
)
}
func sharedIndexTripperware(
cacheMiddleware queryrangebase.Middleware,
cfg Config,
codec queryrangebase.Codec,
limits Limits,
log log.Logger,
metrics *Metrics,
schema config.SchemaConfig,
) (queryrangebase.Tripperware, error) {
return func(next http.RoundTripper) http.RoundTripper {
middlewares := []queryrangebase.Middleware{
NewLimitsMiddleware(limits),
@ -902,7 +982,7 @@ func NewIndexStatsTripperware(
SplitByIntervalMiddleware(schema.Configs, limits, codec, splitByTime, metrics.SplitByMetrics),
}
if cfg.CacheIndexStatsResults {
if cacheMiddleware != nil {
middlewares = append(
middlewares,
queryrangebase.InstrumentMiddleware("log_results_cache", metrics.InstrumentMiddlewareMetrics),

@ -70,6 +70,17 @@ var (
},
},
},
VolumeCacheConfig: VolumeCacheConfig{
ResultsCacheConfig: queryrangebase.ResultsCacheConfig{
CacheConfig: cache.Config{
EnableFifoCache: true,
Fifocache: cache.FifoCacheConfig{
MaxSizeItems: 1024,
TTL: 24 * time.Hour,
},
},
},
},
}
testEngineOpts = logql.EngineOpts{
MaxLookBackPeriod: 30 * time.Second,

@ -57,7 +57,7 @@ func recordQueryMetrics(data *queryData) {
case queryTypeSeries:
logql.RecordSeriesQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.match, data.status, *data.statistics)
case queryTypeVolume:
logql.RecordVolumeQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.status, *data.statistics)
logql.RecordVolumeQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.params.Query(), data.status, *data.statistics)
default:
level.Error(logger).Log("msg", "failed to record query metrics", "err", fmt.Errorf("expected one of the *LokiRequest, *LokiInstantRequest, *LokiSeriesRequest, *LokiLabelNamesRequest, got %s", data.queryType))
}

@ -0,0 +1,134 @@
package queryrange
import (
"context"
"flag"
"fmt"
strings "strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/tenant"
"github.com/prometheus/common/model"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/validation"
)
type VolumeSplitter struct {
cacheKeyLimits
}
// GenerateCacheKey generates a cache key based on the userID, Request and interval.
func (i VolumeSplitter) GenerateCacheKey(ctx context.Context, userID string, r queryrangebase.Request) string {
cacheKey := i.cacheKeyLimits.GenerateCacheKey(ctx, userID, r)
volumeReq := r.(*logproto.VolumeRequest)
limit := volumeReq.GetLimit()
aggregateBy := volumeReq.GetAggregateBy()
targetLabels := volumeReq.GetTargetLabels()
return fmt.Sprintf("volume:%s:%d:%s:%s", cacheKey, limit, aggregateBy, strings.Join(targetLabels, ","))
}
type VolumeExtractor struct{}
// Extract favors the ability to cache over exactness of results. It assumes a constant distribution
// of log volumes over a range and will extract subsets proportionally.
func (p VolumeExtractor) Extract(start, end int64, res queryrangebase.Response, resStart, resEnd int64) queryrangebase.Response {
factor := util.GetFactorOfTime(start, end, resStart, resEnd)
volumeRes := res.(*VolumeResponse)
volumes := volumeRes.Response.GetVolumes()
for i, v := range volumes {
volumes[i].Volume = uint64(float64(v.Volume) * factor)
}
return &VolumeResponse{
Response: &logproto.VolumeResponse{
Volumes: volumes,
Limit: volumeRes.Response.GetLimit(),
},
}
}
func (p VolumeExtractor) ResponseWithoutHeaders(resp queryrangebase.Response) queryrangebase.Response {
volumeRes := resp.(*VolumeResponse)
return &VolumeResponse{
Response: volumeRes.Response,
}
}
type VolumeCacheConfig struct {
queryrangebase.ResultsCacheConfig `yaml:",inline"`
}
// RegisterFlags registers flags.
func (cfg *VolumeCacheConfig) RegisterFlags(f *flag.FlagSet) {
cfg.RegisterFlagsWithPrefix(f, "frontend.volume-results-cache.")
}
func (cfg *VolumeCacheConfig) Validate() error {
return cfg.ResultsCacheConfig.Validate()
}
// volumeCacheMiddlewareNowTimeFunc is a function that returns the current time.
// It is used to allow tests to override the current time.
var volumeCacheMiddlewareNowTimeFunc = model.Now
// shouldCacheVolume returns true if the request should be cached.
// It returns false if:
// - The request end time falls within the max_stats_cache_freshness duration.
func shouldCacheVolume(ctx context.Context, req queryrangebase.Request, lim Limits) (bool, error) {
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return false, err
}
cacheFreshnessCapture := func(id string) time.Duration { return lim.MaxStatsCacheFreshness(ctx, id) }
maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture)
now := volumeCacheMiddlewareNowTimeFunc()
return maxCacheFreshness == 0 || model.Time(req.GetEnd()).Before(now.Add(-maxCacheFreshness)), nil
}
func NewVolumeCacheMiddleware(
log log.Logger,
limits Limits,
merger queryrangebase.Merger,
c cache.Cache,
cacheGenNumberLoader queryrangebase.CacheGenNumberLoader,
shouldCache queryrangebase.ShouldCacheFn,
parallelismForReq func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int,
retentionEnabled bool,
transformer UserIDTransformer,
metrics *queryrangebase.ResultsCacheMetrics,
) (queryrangebase.Middleware, error) {
return queryrangebase.NewResultsCacheMiddleware(
log,
c,
VolumeSplitter{cacheKeyLimits{limits, transformer}},
limits,
merger,
VolumeExtractor{},
cacheGenNumberLoader,
func(ctx context.Context, r queryrangebase.Request) bool {
if shouldCache != nil && !shouldCache(ctx, r) {
return false
}
cacheStats, err := shouldCacheVolume(ctx, r, limits)
if err != nil {
level.Error(log).Log("msg", "failed to determine if volume should be cached. Won't cache", "err", err)
return false
}
return cacheStats
},
parallelismForReq,
retentionEnabled,
metrics,
)
}

@ -0,0 +1,334 @@
package queryrange
import (
"context"
"testing"
"time"
"github.com/go-kit/log"
"github.com/grafana/dskit/user"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/stores/index/seriesvolume"
"github.com/grafana/loki/pkg/util"
)
func TestVolumeCache(t *testing.T) {
setup := func(volResp *VolumeResponse) (*int, queryrangebase.Handler) {
cfg := queryrangebase.ResultsCacheConfig{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache)
require.NoError(t, err)
cacheMiddleware, err := NewVolumeCacheMiddleware(
log.NewNopLogger(),
WithSplitByLimits(fakeLimits{}, 24*time.Hour),
DefaultCodec,
c,
nil,
nil,
func(_ context.Context, _ []string, _ queryrangebase.Request) int {
return 1
},
false,
nil,
nil,
)
require.NoError(t, err)
calls, volHandler := volumeResultHandler(volResp)
rc := cacheMiddleware.Wrap(volHandler)
return calls, rc
}
t.Run("caches the response for the same request", func(t *testing.T) {
volResp := &VolumeResponse{
Response: &logproto.VolumeResponse{
Volumes: []logproto.Volume{
{
Name: `{foo="bar"}`,
Volume: 42,
},
},
Limit: 10,
},
}
calls, handler := setup(volResp)
from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour))
volReq := &logproto.VolumeRequest{
From: from,
Through: through,
Matchers: `{foo="bar"}`,
Limit: 10,
}
*calls = 0
ctx := user.InjectOrgID(context.Background(), "fake")
resp, err := handler.Do(ctx, volReq)
require.NoError(t, err)
require.Equal(t, 1, *calls)
require.Equal(t, volResp, resp)
// Doing same request again shouldn't change anything.
*calls = 0
resp, err = handler.Do(ctx, volReq)
require.NoError(t, err)
require.Equal(t, 0, *calls)
require.Equal(t, volResp, resp)
})
t.Run("a new request with overlapping time range should reuse part of the previous request for the overlap", func(t *testing.T) {
volResp := &VolumeResponse{
Response: &logproto.VolumeResponse{
Volumes: []logproto.Volume{
{
Name: `{foo="bar"}`,
Volume: 42,
},
},
Limit: 10,
},
}
calls, handler := setup(volResp)
from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour))
volReq := &logproto.VolumeRequest{
From: from,
Through: through,
Matchers: `{foo="bar"}`,
Limit: 10,
}
ctx := user.InjectOrgID(context.Background(), "fake")
resp, err := handler.Do(ctx, volReq)
require.NoError(t, err)
require.Equal(t, 1, *calls)
require.Equal(t, volResp, resp)
// The new start time is 15m (i.e. 25%) in the future with regard to the previous request time span.
*calls = 0
req := volReq.WithStartEnd(volReq.GetStart()+(15*time.Minute).Milliseconds(), volReq.GetEnd()+(15*time.Minute).Milliseconds())
vol := float64(0.75)
expectedVol := &VolumeResponse{
Response: &logproto.VolumeResponse{
Volumes: []logproto.Volume{
{
Name: `{foo="bar"}`,
Volume: uint64(vol*float64(42)) + 42,
},
},
Limit: 10,
},
}
resp, err = handler.Do(ctx, req)
require.NoError(t, err)
require.Equal(t, 1, *calls)
require.Equal(t, expectedVol, resp)
})
t.Run("caches are only valid for the same request parameters", func(t *testing.T) {
volResp := &VolumeResponse{
Response: &logproto.VolumeResponse{
Volumes: []logproto.Volume{
{
Name: `{foo="bar"}`,
Volume: 42,
},
},
Limit: 10,
},
}
calls, handler := setup(volResp)
// initial call to fill cache
from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour))
volReq := &logproto.VolumeRequest{
From: from,
Through: through,
Matchers: `{foo="bar"}`,
Limit: 10,
Step: 1,
}
ctx := user.InjectOrgID(context.Background(), "fake")
_, err := handler.Do(ctx, volReq)
require.NoError(t, err)
require.Equal(t, 1, *calls)
type testCase struct {
fn func(*logproto.VolumeRequest)
}
testCases := map[string]testCase{
"different step": {
fn: func(req *logproto.VolumeRequest) {
req.Step = 2
},
},
"new limit": {
fn: func(req *logproto.VolumeRequest) {
req.Limit = 11
},
},
"aggregate by labels": {
fn: func(req *logproto.VolumeRequest) {
req.AggregateBy = seriesvolume.Labels
},
},
"target labels": {
fn: func(req *logproto.VolumeRequest) {
req.TargetLabels = []string{"foo"}
},
},
}
for name, tc := range testCases {
*calls = 0
volReq := &logproto.VolumeRequest{
From: from,
Through: through,
Matchers: `{foo="bar"}`,
Limit: 10,
Step: 1,
}
tc.fn(volReq)
_, err = handler.Do(ctx, volReq)
require.NoError(t, err)
require.Equal(t, 1, *calls, name)
}
})
}
func TestVolumeCache_RecentData(t *testing.T) {
volumeCacheMiddlewareNowTimeFunc = func() model.Time { return model.Time(testTime.UnixMilli()) }
now := volumeCacheMiddlewareNowTimeFunc()
volResp := &VolumeResponse{
Response: &logproto.VolumeResponse{
Volumes: []logproto.Volume{
{
Name: `{foo="bar"}`,
Volume: 42,
},
},
Limit: 10,
},
}
for _, tc := range []struct {
name string
maxStatsCacheFreshness time.Duration
req *logproto.VolumeRequest
expectedCallsBeforeCache int
expectedCallsAfterCache int
expectedResp *VolumeResponse
}{
{
name: "MaxStatsCacheFreshness disabled",
maxStatsCacheFreshness: 0,
req: &logproto.VolumeRequest{
From: now.Add(-1 * time.Hour),
Through: now.Add(-5 * time.Minute), // So we don't hit the max_cache_freshness_per_query limit (1m)
Matchers: `{foo="bar"}`,
Limit: 10,
},
expectedCallsBeforeCache: 1,
expectedCallsAfterCache: 0,
expectedResp: volResp,
},
{
name: "MaxStatsCacheFreshness enabled",
maxStatsCacheFreshness: 30 * time.Minute,
req: &logproto.VolumeRequest{
From: now.Add(-1 * time.Hour),
Through: now.Add(-5 * time.Minute), // So we don't hit the max_cache_freshness_per_query limit (1m)
Matchers: `{foo="bar"}`,
Limit: 10,
},
expectedCallsBeforeCache: 1,
expectedCallsAfterCache: 1, // The whole request is done since it wasn't cached.
expectedResp: volResp,
},
{
name: "MaxStatsCacheFreshness enabled, but request before the max freshness",
maxStatsCacheFreshness: 30 * time.Minute,
req: &logproto.VolumeRequest{
From: now.Add(-1 * time.Hour),
Through: now.Add(-45 * time.Minute),
Matchers: `{foo="bar"}`,
Limit: 10,
},
expectedCallsBeforeCache: 1,
expectedCallsAfterCache: 0,
expectedResp: volResp,
},
} {
t.Run(tc.name, func(t *testing.T) {
cfg := queryrangebase.ResultsCacheConfig{
CacheConfig: cache.Config{
Cache: cache.NewMockCache(),
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache)
defer c.Stop()
require.NoError(t, err)
lim := fakeLimits{maxStatsCacheFreshness: tc.maxStatsCacheFreshness}
cacheMiddleware, err := NewVolumeCacheMiddleware(
log.NewNopLogger(),
WithSplitByLimits(lim, 24*time.Hour),
DefaultCodec,
c,
nil,
nil,
func(_ context.Context, _ []string, _ queryrangebase.Request) int {
return 1
},
false,
nil,
nil,
)
require.NoError(t, err)
calls, statsHandler := volumeResultHandler(volResp)
rc := cacheMiddleware.Wrap(statsHandler)
ctx := user.InjectOrgID(context.Background(), "fake")
resp, err := rc.Do(ctx, tc.req)
require.NoError(t, err)
require.Equal(t, tc.expectedCallsBeforeCache, *calls)
require.Equal(t, tc.expectedResp, resp)
// Doing same request again
*calls = 0
resp, err = rc.Do(ctx, tc.req)
require.NoError(t, err)
require.Equal(t, tc.expectedCallsAfterCache, *calls)
require.Equal(t, tc.expectedResp, resp)
})
}
}
func volumeResultHandler(v *VolumeResponse) (*int, queryrangebase.Handler) {
calls := 0
return &calls, queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) {
calls++
return v, nil
})
}

@ -130,6 +130,15 @@ var queryTests = []struct {
"requests": 0,
"downloadTime": 0
},
"volumeResult": {
"entriesFound": 0,
"entriesRequested": 0,
"entriesStored": 0,
"bytesReceived": 0,
"bytesSent": 0,
"requests": 0,
"downloadTime": 0
},
"result": {
"entriesFound": 0,
"entriesRequested": 0,

@ -132,6 +132,15 @@ var queryTests = []struct {
"requests": 0,
"downloadTime": 0
},
"volumeResult": {
"entriesFound": 0,
"entriesRequested": 0,
"entriesStored": 0,
"bytesReceived": 0,
"bytesSent": 0,
"requests": 0,
"downloadTime": 0
},
"result": {
"entriesFound": 0,
"entriesRequested": 0,
@ -288,6 +297,15 @@ var queryTests = []struct {
"requests": 0,
"downloadTime": 0
},
"volumeResult": {
"entriesFound": 0,
"entriesRequested": 0,
"entriesStored": 0,
"bytesReceived": 0,
"bytesSent": 0,
"requests": 0,
"downloadTime": 0
},
"result": {
"entriesFound": 0,
"entriesRequested": 0,
@ -465,6 +483,15 @@ var queryTests = []struct {
"requests": 0,
"downloadTime": 0
},
"volumeResult": {
"entriesFound": 0,
"entriesRequested": 0,
"entriesStored": 0,
"bytesReceived": 0,
"bytesSent": 0,
"requests": 0,
"downloadTime": 0
},
"result": {
"entriesFound": 0,
"entriesRequested": 0,

Loading…
Cancel
Save