mirror of https://github.com/grafana/loki
chore: Remove experimental bloom settings that have never been used or are not used any more (#15787)
Remove experimental results cache for bloom gateway client
This feature never made it into production. Since it is only an
experimental feature, it can be removed without prior deprecation.
Remove unused `bloom_gateway_shard_size` runtime setting
The setting was once used when bloom gateways still used a ring for
sharding.
Signed-off-by: Christian Haudum <christian.haudum@gmail.com>
pull/15774/head^2
parent
45bae6dd1c
commit
1a9f382e6f
@ -1,189 +0,0 @@ |
||||
package bloomgateway |
||||
|
||||
import ( |
||||
"context" |
||||
"flag" |
||||
"time" |
||||
|
||||
"github.com/go-kit/log" |
||||
"github.com/prometheus/common/model" |
||||
"google.golang.org/grpc" |
||||
|
||||
"github.com/grafana/loki/v3/pkg/logproto" |
||||
"github.com/grafana/loki/v3/pkg/storage/chunk/cache" |
||||
"github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache" |
||||
) |
||||
|
||||
const ( |
||||
cacheParalellism = 1 |
||||
) |
||||
|
||||
type CacheConfig struct { |
||||
resultscache.Config `yaml:",inline"` |
||||
} |
||||
|
||||
// RegisterFlags registers flags.
|
||||
func (cfg *CacheConfig) RegisterFlags(f *flag.FlagSet) { |
||||
cfg.RegisterFlagsWithPrefix("bloom-gateway-client.cache.", f) |
||||
} |
||||
|
||||
func (cfg *CacheConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { |
||||
cfg.Config.RegisterFlagsWithPrefix(f, prefix) |
||||
} |
||||
|
||||
type CacheLimits interface { |
||||
resultscache.Limits |
||||
BloomGatewayCacheKeyInterval(tenantID string) time.Duration |
||||
} |
||||
|
||||
type keyGen struct { |
||||
CacheLimits |
||||
} |
||||
|
||||
func newCacheKeyGen(limits CacheLimits) keyGen { |
||||
return keyGen{limits} |
||||
} |
||||
|
||||
// TODO(owen-d): need to implement our own key-generation which accounts for fingerprint ranges requested.
|
||||
func (k keyGen) GenerateCacheKey(ctx context.Context, tenant string, r resultscache.Request) string { |
||||
return resultscache.ConstSplitter(k.BloomGatewayCacheKeyInterval(tenant)).GenerateCacheKey(ctx, tenant, r) |
||||
} |
||||
|
||||
type extractor struct{} |
||||
|
||||
func newExtractor() extractor { |
||||
return extractor{} |
||||
} |
||||
|
||||
// Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds.
|
||||
// We remove chunks that are not within the given time range.
|
||||
func (e extractor) Extract(start, end int64, r resultscache.Response, _, _ int64) resultscache.Response { |
||||
res := r.(*logproto.FilterChunkRefResponse) |
||||
|
||||
chunkRefs := make([]*logproto.GroupedChunkRefs, 0, len(res.ChunkRefs)) |
||||
for _, chunkRef := range res.ChunkRefs { |
||||
refs := make([]*logproto.ShortRef, 0, len(chunkRef.Refs)) |
||||
for _, ref := range chunkRef.Refs { |
||||
if model.Time(end) < ref.From || ref.Through <= model.Time(start) { |
||||
continue |
||||
} |
||||
refs = append(refs, ref) |
||||
} |
||||
if len(refs) > 0 { |
||||
chunkRefs = append(chunkRefs, &logproto.GroupedChunkRefs{ |
||||
Fingerprint: chunkRef.Fingerprint, |
||||
Labels: chunkRef.Labels, |
||||
Tenant: chunkRef.Tenant, |
||||
Refs: refs, |
||||
}) |
||||
} |
||||
} |
||||
|
||||
return &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: chunkRefs, |
||||
} |
||||
} |
||||
|
||||
type merger struct{} |
||||
|
||||
func newMerger() merger { |
||||
return merger{} |
||||
} |
||||
|
||||
// MergeResponse merges responses from multiple requests into a single Response
|
||||
// We merge all chunks grouped by their fingerprint.
|
||||
func (m merger) MergeResponse(responses ...resultscache.Response) (resultscache.Response, error) { |
||||
var size int |
||||
|
||||
unmerged := make([][]*logproto.GroupedChunkRefs, 0, len(responses)) |
||||
for _, r := range responses { |
||||
res := r.(*logproto.FilterChunkRefResponse) |
||||
unmerged = append(unmerged, res.ChunkRefs) |
||||
size += len(res.ChunkRefs) |
||||
} |
||||
|
||||
buf := make([]*logproto.GroupedChunkRefs, 0, size) |
||||
deduped, err := mergeSeries(unmerged, buf) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return &logproto.FilterChunkRefResponse{ChunkRefs: deduped}, nil |
||||
} |
||||
|
||||
type ClientCache struct { |
||||
cache *resultscache.ResultsCache |
||||
next logproto.BloomGatewayClient |
||||
limits CacheLimits |
||||
logger log.Logger |
||||
} |
||||
|
||||
func NewBloomGatewayClientCacheMiddleware( |
||||
logger log.Logger, |
||||
next logproto.BloomGatewayClient, |
||||
c cache.Cache, |
||||
limits CacheLimits, |
||||
cacheGen resultscache.CacheGenNumberLoader, |
||||
retentionEnabled bool, |
||||
) *ClientCache { |
||||
nextAsHandler := resultscache.HandlerFunc(func(ctx context.Context, cacheReq resultscache.Request) (resultscache.Response, error) { |
||||
req := cacheReq.(requestWithGrpcCallOptions) |
||||
return next.FilterChunkRefs(ctx, req.FilterChunkRefRequest, req.grpcCallOptions...) |
||||
}) |
||||
|
||||
resultsCache := resultscache.NewResultsCache( |
||||
logger, |
||||
c, |
||||
nextAsHandler, |
||||
newCacheKeyGen(limits), |
||||
limits, |
||||
newMerger(), |
||||
newExtractor(), |
||||
nil, |
||||
nil, |
||||
func(_ context.Context, _ []string, _ resultscache.Request) int { |
||||
return cacheParalellism |
||||
}, |
||||
cacheGen, |
||||
retentionEnabled, |
||||
false, |
||||
) |
||||
|
||||
return &ClientCache{ |
||||
next: next, |
||||
cache: resultsCache, |
||||
limits: limits, |
||||
logger: logger, |
||||
} |
||||
} |
||||
|
||||
// PrefetchBloomBlocks implements logproto.BloomGatewayClient.
|
||||
func (c *ClientCache) PrefetchBloomBlocks(ctx context.Context, in *logproto.PrefetchBloomBlocksRequest, opts ...grpc.CallOption) (*logproto.PrefetchBloomBlocksResponse, error) { |
||||
return c.next.PrefetchBloomBlocks(ctx, in, opts...) |
||||
} |
||||
|
||||
// FilterChunkRefs implements logproto.BloomGatewayClient.
|
||||
func (c *ClientCache) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunkRefRequest, opts ...grpc.CallOption) (*logproto.FilterChunkRefResponse, error) { |
||||
cacheReq := requestWithGrpcCallOptions{ |
||||
FilterChunkRefRequest: req, |
||||
grpcCallOptions: opts, |
||||
} |
||||
res, err := c.cache.Do(ctx, cacheReq) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return res.(*logproto.FilterChunkRefResponse), nil |
||||
} |
||||
|
||||
type requestWithGrpcCallOptions struct { |
||||
*logproto.FilterChunkRefRequest |
||||
grpcCallOptions []grpc.CallOption |
||||
} |
||||
|
||||
func (r requestWithGrpcCallOptions) WithStartEndForCache(start time.Time, end time.Time) resultscache.Request { |
||||
return requestWithGrpcCallOptions{ |
||||
FilterChunkRefRequest: r.FilterChunkRefRequest.WithStartEndForCache(start, end).(*logproto.FilterChunkRefRequest), |
||||
grpcCallOptions: r.grpcCallOptions, |
||||
} |
||||
} |
||||
@ -1,507 +0,0 @@ |
||||
package bloomgateway |
||||
|
||||
import ( |
||||
"context" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/go-kit/log" |
||||
"github.com/grafana/dskit/user" |
||||
"github.com/prometheus/common/model" |
||||
"github.com/stretchr/testify/require" |
||||
"google.golang.org/grpc" |
||||
|
||||
"github.com/grafana/loki/v3/pkg/logproto" |
||||
"github.com/grafana/loki/v3/pkg/logql/syntax" |
||||
"github.com/grafana/loki/v3/pkg/logqlmodel/stats" |
||||
"github.com/grafana/loki/v3/pkg/querier/plan" |
||||
"github.com/grafana/loki/v3/pkg/storage/chunk/cache" |
||||
"github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache" |
||||
"github.com/grafana/loki/v3/pkg/util/constants" |
||||
) |
||||
|
||||
// Range is 1000-4000
|
||||
var templateResponse = &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
{ |
||||
Fingerprint: 1, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 3000, |
||||
Through: 4000, |
||||
Checksum: 30, |
||||
}, |
||||
{ |
||||
From: 1000, |
||||
Through: 3000, |
||||
Checksum: 40, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
func TestExtract(t *testing.T) { |
||||
for _, tc := range []struct { |
||||
name string |
||||
start int64 |
||||
end int64 |
||||
input *logproto.FilterChunkRefResponse |
||||
expected *logproto.FilterChunkRefResponse |
||||
}{ |
||||
{ |
||||
name: "start and end out of range", |
||||
start: 100, |
||||
end: 200, |
||||
input: templateResponse, |
||||
expected: &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{}, |
||||
}, |
||||
}, |
||||
{ |
||||
name: "start spans exact range", |
||||
start: 1000, |
||||
end: 4000, |
||||
input: templateResponse, |
||||
expected: templateResponse, |
||||
}, |
||||
{ |
||||
name: "start spans more than range", |
||||
start: 100, |
||||
end: 5000, |
||||
input: templateResponse, |
||||
expected: templateResponse, |
||||
}, |
||||
{ |
||||
name: "start and end within range", |
||||
start: 1700, |
||||
end: 2700, |
||||
input: templateResponse, |
||||
expected: &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
{ |
||||
Fingerprint: 1, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 3000, |
||||
Checksum: 40, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
} { |
||||
t.Run(tc.name, func(t *testing.T) { |
||||
e := newExtractor() |
||||
actual := e.Extract(tc.start, tc.end, tc.input, 0, 0) |
||||
require.Equal(t, tc.expected, actual) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestMerge(t *testing.T) { |
||||
for _, tc := range []struct { |
||||
name string |
||||
input []*logproto.FilterChunkRefResponse |
||||
expected *logproto.FilterChunkRefResponse |
||||
}{ |
||||
{ |
||||
name: "empy input", |
||||
input: []*logproto.FilterChunkRefResponse{}, |
||||
expected: &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{}, |
||||
}, |
||||
}, |
||||
{ |
||||
name: "single input", |
||||
input: []*logproto.FilterChunkRefResponse{templateResponse}, |
||||
expected: templateResponse, |
||||
}, |
||||
{ |
||||
name: "repeating and non-repeating fingerprint with repeating and non-repeating chunks", |
||||
input: []*logproto.FilterChunkRefResponse{ |
||||
{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
{ |
||||
Fingerprint: 1, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
// Same FP as in previous input and same chunks
|
||||
{ |
||||
Fingerprint: 1, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
// Same FP as in previous input, but different chunks
|
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
// Same chunk as in previous input
|
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
// New chunk
|
||||
{ |
||||
From: 2000, |
||||
Through: 2500, |
||||
Checksum: 30, |
||||
}, |
||||
}, |
||||
}, |
||||
// New FP
|
||||
{ |
||||
Fingerprint: 3, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
// Same FP as in previous input and diff chunks
|
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 700, |
||||
Through: 1000, |
||||
Checksum: 40, |
||||
}, |
||||
{ |
||||
From: 2000, |
||||
Through: 2700, |
||||
Checksum: 50, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
expected: &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: []*logproto.GroupedChunkRefs{ |
||||
{ |
||||
Fingerprint: 1, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Fingerprint: 2, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 700, |
||||
Through: 1000, |
||||
Checksum: 40, |
||||
}, |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
{ |
||||
From: 2000, |
||||
Through: 2500, |
||||
Checksum: 30, |
||||
}, |
||||
{ |
||||
From: 2000, |
||||
Through: 2700, |
||||
Checksum: 50, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Fingerprint: 3, |
||||
Tenant: "fake", |
||||
Refs: []*logproto.ShortRef{ |
||||
{ |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 20, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
} { |
||||
t.Run(tc.name, func(t *testing.T) { |
||||
input := make([]resultscache.Response, 0, len(tc.input)) |
||||
for _, i := range tc.input { |
||||
input = append(input, i) |
||||
} |
||||
|
||||
m := newMerger() |
||||
actual, err := m.MergeResponse(input...) |
||||
require.NoError(t, err) |
||||
|
||||
resp, ok := actual.(*logproto.FilterChunkRefResponse) |
||||
require.True(t, ok) |
||||
require.Equal(t, tc.expected, resp) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestCache(t *testing.T) { |
||||
ctx := user.InjectOrgID(context.Background(), "fake") |
||||
|
||||
limits := mockLimits{ |
||||
cacheInterval: 15 * time.Minute, |
||||
} |
||||
|
||||
cfg := CacheConfig{ |
||||
Config: resultscache.Config{ |
||||
CacheConfig: cache.Config{ |
||||
Cache: cache.NewMockCache(), |
||||
}, |
||||
}, |
||||
} |
||||
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.BloomFilterCache, constants.Loki) |
||||
require.NoError(t, err) |
||||
defer c.Stop() |
||||
|
||||
chunkRefs := []*logproto.ChunkRef{ |
||||
{ |
||||
Fingerprint: 2, |
||||
UserID: "fake", |
||||
From: 1500, |
||||
Through: 2500, |
||||
Checksum: 30, |
||||
}, |
||||
{ |
||||
Fingerprint: 3, |
||||
UserID: "fake", |
||||
From: 2500, |
||||
Through: 3500, |
||||
}, |
||||
} |
||||
expr, err := syntax.ParseExpr(`{foo="bar"} |= "does not match"`) |
||||
require.NoError(t, err) |
||||
req := &logproto.FilterChunkRefRequest{ |
||||
From: model.Time(2000), |
||||
Through: model.Time(3000), |
||||
Refs: groupRefs(t, chunkRefs), |
||||
Plan: plan.QueryPlan{AST: expr}, |
||||
} |
||||
expectedRes := &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: groupRefs(t, chunkRefs), |
||||
} |
||||
|
||||
server, calls := newMockServer(expectedRes) |
||||
|
||||
cacheMiddleware := NewBloomGatewayClientCacheMiddleware( |
||||
log.NewNopLogger(), |
||||
server, |
||||
c, |
||||
limits, |
||||
nil, |
||||
false, |
||||
) |
||||
|
||||
// First call should go to the server
|
||||
*calls = 0 |
||||
res, err := cacheMiddleware.FilterChunkRefs(ctx, req) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 1, *calls) |
||||
require.Equal(t, expectedRes, res) |
||||
|
||||
// Second call should go to the cache
|
||||
*calls = 0 |
||||
res, err = cacheMiddleware.FilterChunkRefs(ctx, req) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 0, *calls) |
||||
require.Equal(t, expectedRes, res) |
||||
|
||||
// Doing a request with new start and end should:
|
||||
// 1. hit the server the leading time
|
||||
// 2. hit the cache the cached span
|
||||
// 3. hit the server for the trailing time
|
||||
newChunkRefs := []*logproto.ChunkRef{ |
||||
{ |
||||
Fingerprint: 1, |
||||
UserID: "fake", |
||||
From: 1000, |
||||
Through: 1500, |
||||
Checksum: 10, |
||||
}, |
||||
{ |
||||
Fingerprint: 4, |
||||
UserID: "fake", |
||||
From: 3500, |
||||
Through: 4500, |
||||
}, |
||||
} |
||||
server.SetResponse(&logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: groupRefs(t, newChunkRefs), |
||||
}) |
||||
expectedRes = &logproto.FilterChunkRefResponse{ |
||||
ChunkRefs: groupRefs(t, append(chunkRefs, newChunkRefs...)), |
||||
} |
||||
req.From = model.Time(100) |
||||
req.Through = model.Time(5000) |
||||
*calls = 0 |
||||
res, err = cacheMiddleware.FilterChunkRefs(ctx, req) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 2, *calls) |
||||
require.ElementsMatch(t, expectedRes.ChunkRefs, res.ChunkRefs) |
||||
|
||||
// Doing a request again should only hit the cache
|
||||
*calls = 0 |
||||
res, err = cacheMiddleware.FilterChunkRefs(ctx, req) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 0, *calls) |
||||
require.ElementsMatch(t, expectedRes.ChunkRefs, res.ChunkRefs) |
||||
} |
||||
|
||||
type mockServer struct { |
||||
calls *int |
||||
res *logproto.FilterChunkRefResponse |
||||
} |
||||
|
||||
var _ logproto.BloomGatewayClient = &mockServer{} |
||||
|
||||
func newMockServer(res *logproto.FilterChunkRefResponse) (*mockServer, *int) { |
||||
var calls int |
||||
return &mockServer{ |
||||
calls: &calls, |
||||
res: res, |
||||
}, &calls |
||||
} |
||||
|
||||
func (s *mockServer) SetResponse(res *logproto.FilterChunkRefResponse) { |
||||
s.res = res |
||||
} |
||||
|
||||
// FilterChunkRefs implements logproto.BloomGatewayClient.
|
||||
func (s *mockServer) FilterChunkRefs(_ context.Context, _ *logproto.FilterChunkRefRequest, _ ...grpc.CallOption) (*logproto.FilterChunkRefResponse, error) { |
||||
*s.calls++ |
||||
return s.res, nil |
||||
} |
||||
|
||||
// PrefetchBloomBlocks implements logproto.BloomGatewayClient.
|
||||
func (s *mockServer) PrefetchBloomBlocks(_ context.Context, _ *logproto.PrefetchBloomBlocksRequest, _ ...grpc.CallOption) (*logproto.PrefetchBloomBlocksResponse, error) { |
||||
panic("unimplemented") |
||||
} |
||||
|
||||
type mockLimits struct { |
||||
cacheFreshness time.Duration |
||||
cacheInterval time.Duration |
||||
} |
||||
|
||||
func (m mockLimits) MaxCacheFreshness(_ context.Context, _ string) time.Duration { |
||||
return m.cacheFreshness |
||||
} |
||||
|
||||
func (m mockLimits) BloomGatewayCacheKeyInterval(_ string) time.Duration { |
||||
return m.cacheInterval |
||||
} |
||||
Loading…
Reference in new issue