mirror of https://github.com/grafana/loki
Bloom-Gateway cache (#11380)
**What this PR does / why we need it**: This PR adds caching to the bloom-gateway client. It uses the result cache from https://github.com/grafana/loki/pull/11343. Here's how we: - Merge responses: group all chunks by FP and remove duplicated chunk checksums. - Extract responses based on time span: For all chunks in each FP, add to the extracted response only the chunks that overlaps with the desired start and end time.pull/11340/head^2
parent
79693d79ef
commit
a0b462d366
@ -0,0 +1,217 @@ |
||||
package bloomgateway |
||||
|
||||
import ( |
||||
"context" |
||||
"flag" |
||||
"sort" |
||||
"time" |
||||
|
||||
"github.com/go-kit/log" |
||||
"github.com/prometheus/common/model" |
||||
"golang.org/x/exp/slices" |
||||
"google.golang.org/grpc" |
||||
|
||||
"github.com/grafana/loki/pkg/logproto" |
||||
"github.com/grafana/loki/pkg/storage/chunk/cache" |
||||
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" |
||||
) |
||||
|
||||
const ( |
||||
cacheParalellism = 1 |
||||
) |
||||
|
||||
type CacheConfig struct { |
||||
resultscache.Config `yaml:",inline"` |
||||
} |
||||
|
||||
// RegisterFlags registers flags.
|
||||
func (cfg *CacheConfig) RegisterFlags(f *flag.FlagSet) { |
||||
cfg.RegisterFlagsWithPrefix("bloom-gateway-client.cache.", f) |
||||
} |
||||
|
||||
func (cfg *CacheConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { |
||||
cfg.Config.RegisterFlagsWithPrefix(f, prefix) |
||||
} |
||||
|
||||
type CacheLimits interface { |
||||
resultscache.Limits |
||||
BloomGatewayCacheKeyInterval(tenantID string) time.Duration |
||||
} |
||||
|
||||
type keyGen struct { |
||||
CacheLimits |
||||
} |
||||
|
||||
func newCacheKeyGen(limits CacheLimits) keyGen { |
||||
return keyGen{limits} |
||||
} |
||||
|
||||
func (k keyGen) GenerateCacheKey(ctx context.Context, tenant string, r resultscache.Request) string { |
||||
return resultscache.ConstSplitter(k.BloomGatewayCacheKeyInterval(tenant)).GenerateCacheKey(ctx, tenant, r) |
||||
} |
||||
|
||||
type extractor struct{} |
||||
|
||||
func newExtractor() extractor { |
||||
return extractor{} |
||||
} |
||||
|
||||
// Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds.
|
||||
// We remove chunks that are not within the given time range.
|
||||
func (e extractor) Extract(start, end int64, r resultscache.Response, _, _ int64) resultscache.Response { |
||||
res := r.(*logproto.FilterChunkRefResponse) |
||||
|
||||
chunkRefs := make([]*logproto.GroupedChunkRefs, 0, len(res.ChunkRefs)) |
||||
for _, chunkRef := range res.ChunkRefs { |
||||
refs := make([]*logproto.ShortRef, 0, len(chunkRef.Refs)) |
||||
for _, ref := range chunkRef.Refs { |
||||
if model.Time(end) < ref.From || ref.Through <= model.Time(start) { |
||||
continue |
||||
} |
||||
refs = append(refs, ref) |
||||
} |
||||
if len(refs) > 0 { |
||||
chunkRefs = append(chunkRefs, &logproto.GroupedChunkRefs{ |
||||
Fingerprint: chunkRef.Fingerprint, |
||||
Tenant: chunkRef.Tenant, |
||||
Refs: refs, |
||||
}) |
||||
} |
||||
} |
||||
|
||||
return &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: chunkRefs, |
||||
} |
||||
} |
||||
|
||||
type merger struct{} |
||||
|
||||
func newMerger() merger { |
||||
return merger{} |
||||
} |
||||
|
||||
// MergeResponse merges responses from multiple requests into a single Response
|
||||
// We merge all chunks grouped by their fingerprint.
|
||||
func (m merger) MergeResponse(responses ...resultscache.Response) (resultscache.Response, error) { |
||||
var size int |
||||
for _, r := range responses { |
||||
res := r.(*logproto.FilterChunkRefResponse) |
||||
size += len(res.ChunkRefs) |
||||
} |
||||
|
||||
chunkRefs := make([]*logproto.GroupedChunkRefs, 0, size) |
||||
for _, r := range responses { |
||||
res := r.(*logproto.FilterChunkRefResponse) |
||||
chunkRefs = append(chunkRefs, res.ChunkRefs...) |
||||
} |
||||
|
||||
return &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: mergeGroupedChunkRefs(chunkRefs), |
||||
}, nil |
||||
} |
||||
|
||||
// Merge duplicated fingerprints by:
|
||||
// 1. Sort the chunkRefs by their stream fingerprint
|
||||
// 2. Remove duplicated FPs appending all chunks into the first fingerprint's chunk list.
|
||||
func mergeGroupedChunkRefs(chunkRefs []*logproto.GroupedChunkRefs) []*logproto.GroupedChunkRefs { |
||||
if len(chunkRefs) <= 1 { |
||||
return chunkRefs |
||||
} |
||||
|
||||
sort.Slice(chunkRefs, func(i, j int) bool { |
||||
return chunkRefs[i].Fingerprint < chunkRefs[j].Fingerprint |
||||
}) |
||||
|
||||
var lastDiffFP int |
||||
for i := 1; i < len(chunkRefs); i++ { |
||||
if chunkRefs[lastDiffFP].Fingerprint == chunkRefs[i].Fingerprint { |
||||
chunkRefs[lastDiffFP].Refs = mergeShortRefs(append(chunkRefs[lastDiffFP].Refs, chunkRefs[i].Refs...)) |
||||
} else { |
||||
lastDiffFP++ |
||||
chunkRefs[lastDiffFP] = chunkRefs[i] |
||||
} |
||||
} |
||||
return chunkRefs[:lastDiffFP+1] |
||||
} |
||||
|
||||
// mergeShortRefs merges short-refs by removing duplicated checksums.
|
||||
func mergeShortRefs(refs []*logproto.ShortRef) []*logproto.ShortRef { |
||||
if len(refs) <= 1 { |
||||
return refs |
||||
} |
||||
|
||||
sort.Slice(refs, func(i, j int) bool { |
||||
return refs[i].Checksum < refs[j].Checksum |
||||
}) |
||||
return slices.CompactFunc(refs, func(a, b *logproto.ShortRef) bool { |
||||
return a.Checksum == b.Checksum |
||||
}) |
||||
} |
||||
|
||||
type ClientCache struct { |
||||
cache *resultscache.ResultsCache |
||||
limits CacheLimits |
||||
logger log.Logger |
||||
} |
||||
|
||||
func NewBloomGatewayClientCacheMiddleware( |
||||
logger log.Logger, |
||||
next logproto.BloomGatewayClient, |
||||
c cache.Cache, |
||||
limits CacheLimits, |
||||
cacheGen resultscache.CacheGenNumberLoader, |
||||
retentionEnabled bool, |
||||
) *ClientCache { |
||||
nextAsHandler := resultscache.HandlerFunc(func(ctx context.Context, cacheReq resultscache.Request) (resultscache.Response, error) { |
||||
req := cacheReq.(requestWithGrpcCallOptions) |
||||
return next.FilterChunkRefs(ctx, req.FilterChunkRefRequest, req.grpcCallOptions...) |
||||
}) |
||||
|
||||
resultsCache := resultscache.NewResultsCache( |
||||
logger, |
||||
c, |
||||
nextAsHandler, |
||||
newCacheKeyGen(limits), |
||||
limits, |
||||
newMerger(), |
||||
newExtractor(), |
||||
nil, |
||||
nil, |
||||
func(_ context.Context, _ []string, _ resultscache.Request) int { |
||||
return cacheParalellism |
||||
}, |
||||
cacheGen, |
||||
retentionEnabled, |
||||
) |
||||
|
||||
return &ClientCache{ |
||||
cache: resultsCache, |
||||
limits: limits, |
||||
logger: logger, |
||||
} |
||||
} |
||||
|
||||
func (c *ClientCache) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunkRefRequest, opts ...grpc.CallOption) (*logproto.FilterChunkRefResponse, error) { |
||||
cacheReq := requestWithGrpcCallOptions{ |
||||
FilterChunkRefRequest: req, |
||||
grpcCallOptions: opts, |
||||
} |
||||
res, err := c.cache.Do(ctx, cacheReq) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return res.(*logproto.FilterChunkRefResponse), nil |
||||
} |
||||
|
||||
type requestWithGrpcCallOptions struct { |
||||
*logproto.FilterChunkRefRequest |
||||
grpcCallOptions []grpc.CallOption |
||||
} |
||||
|
||||
func (r requestWithGrpcCallOptions) WithStartEndForCache(start time.Time, end time.Time) resultscache.Request { |
||||
return requestWithGrpcCallOptions{ |
||||
FilterChunkRefRequest: r.FilterChunkRefRequest.WithStartEndForCache(start, end).(*logproto.FilterChunkRefRequest), |
||||
grpcCallOptions: r.grpcCallOptions, |
||||
} |
||||
} |
||||
@ -0,0 +1,494 @@ |
||||
package bloomgateway |
||||
|
||||
import ( |
||||
"context" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/go-kit/log" |
||||
"github.com/grafana/dskit/user" |
||||
"github.com/prometheus/common/model" |
||||
"github.com/stretchr/testify/require" |
||||
"google.golang.org/grpc" |
||||
|
||||
"github.com/grafana/loki/pkg/logproto" |
||||
"github.com/grafana/loki/pkg/logqlmodel/stats" |
||||
"github.com/grafana/loki/pkg/storage/chunk/cache" |
||||
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" |
||||
"github.com/grafana/loki/pkg/util/constants" |
||||
) |
||||
|
||||
// Range is 1000-4000
|
||||
var templateResponse = &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
{ |
||||
Fingerprint: 1, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 3000, |
||||
Through: 4000, |
||||
Checksum: 30, |
||||
}, |
||||
{ |
||||
From: 1000, |
||||
Through: 3000, |
||||
Checksum: 40, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
func TestExtract(t *testing.T) { |
||||
for _, tc := range []struct { |
||||
name string |
||||
start int64 |
||||
end int64 |
||||
input *logproto.FilterChunkRefResponse |
||||
expected *logproto.FilterChunkRefResponse |
||||
}{ |
||||
{ |
||||
name: "start and end out of range", |
||||
start: 100, |
||||
end: 200, |
||||
input: templateResponse, |
||||
expected: &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{}, |
||||
}, |
||||
}, |
||||
{ |
||||
name: "start spans exact range", |
||||
start: 1000, |
||||
end: 4000, |
||||
input: templateResponse, |
||||
expected: templateResponse, |
||||
}, |
||||
{ |
||||
name: "start spans more than range", |
||||
start: 100, |
||||
end: 5000, |
||||
input: templateResponse, |
||||
expected: templateResponse, |
||||
}, |
||||
{ |
||||
name: "start and end within range", |
||||
start: 1700, |
||||
end: 2700, |
||||
input: templateResponse, |
||||
expected: &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
{ |
||||
Fingerprint: 1, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 3000, |
||||
Checksum: 40, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
} { |
||||
t.Run(tc.name, func(t *testing.T) { |
||||
e := newExtractor() |
||||
actual := e.Extract(tc.start, tc.end, tc.input, 0, 0) |
||||
require.Equal(t, tc.expected, actual) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestMerge(t *testing.T) { |
||||
for _, tc := range []struct { |
||||
name string |
||||
input []*logproto.FilterChunkRefResponse |
||||
expected *logproto.FilterChunkRefResponse |
||||
}{ |
||||
{ |
||||
name: "empy input", |
||||
input: []*logproto.FilterChunkRefResponse{}, |
||||
expected: &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{}, |
||||
}, |
||||
}, |
||||
{ |
||||
name: "single input", |
||||
input: []*logproto.FilterChunkRefResponse{templateResponse}, |
||||
expected: templateResponse, |
||||
}, |
||||
{ |
||||
name: "repeating and non-repeating fingerprint with repeating and non-repeating chunks", |
||||
input: []*logproto.FilterChunkRefResponse{ |
||||
{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
{ |
||||
Fingerprint: 1, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
// Same FP as in previous input and same chunks
|
||||
{ |
||||
Fingerprint: 1, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
// Same FP as in previous input, but different chunks
|
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
// Same chunk as in previous input
|
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
// New chunk
|
||||
{ |
||||
From: 2000, |
||||
Through: 2500, |
||||
Checksum: 30, |
||||
}, |
||||
}, |
||||
}, |
||||
// New FP
|
||||
{ |
||||
Fingerprint: 3, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
// Same FP as in previous input and diff chunks
|
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 700, |
||||
Through: 1000, |
||||
Checksum: 40, |
||||
}, |
||||
{ |
||||
From: 2000, |
||||
Through: 2700, |
||||
Checksum: 50, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
expected: &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
{ |
||||
Fingerprint: 1, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
{ |
||||
From: 2000, |
||||
Through: 2500, |
||||
Checksum: 30, |
||||
}, |
||||
{ |
||||
From: 700, |
||||
Through: 1000, |
||||
Checksum: 40, |
||||
}, |
||||
{ |
||||
From: 2000, |
||||
Through: 2700, |
||||
Checksum: 50, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Fingerprint: 3, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
} { |
||||
t.Run(tc.name, func(t *testing.T) { |
||||
input := make([]resultscache.Response, 0, len(tc.input)) |
||||
for _, i := range tc.input { |
||||
input = append(input, i) |
||||
} |
||||
|
||||
m := newMerger() |
||||
actual, err := m.MergeResponse(input...) |
||||
require.NoError(t, err) |
||||
require.Equal(t, tc.expected, actual) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestCache(t *testing.T) { |
||||
ctx := user.InjectOrgID(context.Background(), "fake") |
||||
|
||||
limits := mockLimits{ |
||||
cacheInterval: 15 * time.Minute, |
||||
} |
||||
|
||||
cfg := CacheConfig{ |
||||
Config: resultscache.Config{ |
||||
CacheConfig: cache.Config{ |
||||
Cache: cache.NewMockCache(), |
||||
}, |
||||
}, |
||||
} |
||||
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.BloomFilterCache, constants.Loki) |
||||
require.NoError(t, err) |
||||
defer c.Stop() |
||||
|
||||
chunkRefs := []*logproto.ChunkRef{ |
||||
{ |
||||
Fingerprint: 2, |
||||
UserID: "fake", |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 30, |
||||
}, |
||||
{ |
||||
Fingerprint: 3, |
||||
UserID: "fake", |
||||
From: 2500, |
||||
Through: 3500, |
||||
}, |
||||
} |
||||
req := &logproto.FilterChunkRefRequest{ |
||||
From: model.Time(2000), |
||||
Through: model.Time(3000), |
||||
Refs: groupRefs(t, chunkRefs), |
||||
Filters: []*logproto.LineFilterExpression{ |
||||
{Operator: 1, Match: "foo"}, |
||||
}, |
||||
} |
||||
expectedRes := &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: groupRefs(t, chunkRefs), |
||||
} |
||||
|
||||
server, calls := newMockServer(expectedRes) |
||||
|
||||
cacheMiddleware := NewBloomGatewayClientCacheMiddleware( |
||||
log.NewNopLogger(), |
||||
server, |
||||
c, |
||||
limits, |
||||
nil, |
||||
false, |
||||
) |
||||
|
||||
// First call should go to the server
|
||||
*calls = 0 |
||||
res, err := cacheMiddleware.FilterChunkRefs(ctx, req) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 1, *calls) |
||||
require.Equal(t, expectedRes, res) |
||||
|
||||
// Second call should go to the cache
|
||||
*calls = 0 |
||||
res, err = cacheMiddleware.FilterChunkRefs(ctx, req) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 0, *calls) |
||||
require.Equal(t, expectedRes, res) |
||||
|
||||
// Doing a request with new start and end should:
|
||||
// 1. hit the server the leading time
|
||||
// 2. hit the cache the cached span
|
||||
// 3. hit the server for the trailing time
|
||||
newChunkRefs := []*logproto.ChunkRef{ |
||||
{ |
||||
Fingerprint: 1, |
||||
UserID: "fake", |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
Fingerprint: 4, |
||||
UserID: "fake", |
||||
From: 3500, |
||||
Through: 4500, |
||||
}, |
||||
} |
||||
server.SetResponse(&logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: groupRefs(t, newChunkRefs), |
||||
}) |
||||
expectedRes = &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: groupRefs(t, append(chunkRefs, newChunkRefs...)), |
||||
} |
||||
req.From = model.Time(100) |
||||
req.Through = model.Time(5000) |
||||
*calls = 0 |
||||
res, err = cacheMiddleware.FilterChunkRefs(ctx, req) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 2, *calls) |
||||
require.Equal(t, expectedRes, res) |
||||
|
||||
// Doing a request again should only hit the cache
|
||||
*calls = 0 |
||||
res, err = cacheMiddleware.FilterChunkRefs(ctx, req) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 0, *calls) |
||||
require.Equal(t, expectedRes, res) |
||||
} |
||||
|
||||
type mockServer struct { |
||||
calls *int |
||||
res *logproto.FilterChunkRefResponse |
||||
} |
||||
|
||||
func newMockServer(res *logproto.FilterChunkRefResponse) (*mockServer, *int) { |
||||
var calls int |
||||
return &mockServer{ |
||||
calls: &calls, |
||||
res: res, |
||||
}, &calls |
||||
} |
||||
|
||||
func (s *mockServer) SetResponse(res *logproto.FilterChunkRefResponse) { |
||||
s.res = res |
||||
} |
||||
|
||||
func (s *mockServer) FilterChunkRefs(_ context.Context, _ *logproto.FilterChunkRefRequest, _ ...grpc.CallOption) (*logproto.FilterChunkRefResponse, error) { |
||||
*s.calls++ |
||||
return s.res, nil |
||||
} |
||||
|
||||
type mockLimits struct { |
||||
cacheFreshness time.Duration |
||||
cacheInterval time.Duration |
||||
} |
||||
|
||||
func (m mockLimits) MaxCacheFreshness(_ context.Context, _ string) time.Duration { |
||||
return m.cacheFreshness |
||||
} |
||||
|
||||
func (m mockLimits) BloomGatewayCacheKeyInterval(_ string) time.Duration { |
||||
return m.cacheInterval |
||||
} |
||||
Loading…
Reference in new issue