@ -6,6 +6,7 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
@ -99,9 +100,7 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
preFilterChunks := len ( chunkRefs )
preFilterSeries := len ( grouped )
result := make ( [ ] * logproto . ChunkRef , 0 , len ( chunkRefs ) )
seriesSeen := make ( map [ uint64 ] struct { } , len ( grouped ) )
responses := make ( [ ] [ ] * logproto . GroupedChunkRefs , 0 , 2 )
// We can perform requests sequentially, because most of the time the request
// only covers a single day, and if not, it's at most two days.
for _ , s := range partitionSeriesByDay ( from , through , grouped ) {
@ -110,19 +109,6 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
if err != nil {
return nil , err
}
var chunks int
for i := range s . series {
chunks += len ( s . series [ i ] . Refs )
}
sp . LogKV (
"day" , s . day . Time . Time ( ) ,
"from" , s . interval . Start . Time ( ) ,
"through" , s . interval . End . Time ( ) ,
"series" , len ( s . series ) ,
"chunks" , chunks ,
"blocks" , len ( blocks ) ,
"skipped" , len ( skipped ) ,
)
refs , err := bq . c . FilterChunks ( ctx , tenant , s . interval , blocks , queryPlan )
if err != nil {
@ -130,33 +116,45 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
}
// add chunk refs from series that were not mapped to any blocks
ref s = append ( refs , skipped ... )
response s = append ( responses , refs , skipped )
bq . metrics . seriesSkipped . Add ( float64 ( len ( skipped ) ) )
}
deduped , err := mergeSeries ( responses , nil )
if err != nil {
return nil , errors . Wrap ( err , "failed to dedupe results" )
}
for i := range refs {
seriesSeen [ refs [ i ] . Fingerprint ] = struct { } { }
for _ , ref := range refs [ i ] . Refs {
result = append ( result , & logproto . ChunkRef {
Fingerprint : refs [ i ] . Fingerprint ,
UserID : tenant ,
From : ref . From ,
Through : ref . Through ,
Checksum : ref . Checksum ,
} )
}
result := make ( [ ] * logproto . ChunkRef , 0 , len ( chunkRefs ) )
for i := range deduped {
for _ , ref := range deduped [ i ] . Refs {
result = append ( result , & logproto . ChunkRef {
Fingerprint : deduped [ i ] . Fingerprint ,
UserID : tenant ,
From : ref . From ,
Through : ref . Through ,
Checksum : ref . Checksum ,
} )
}
}
postFilterChunks := len ( result )
postFilterSeries := len ( deduped )
level . Debug ( bq . logger ) . Log (
"operation" , "bloomquerier.FilterChunkRefs" ,
"tenant" , tenant ,
"from" , from . Time ( ) ,
"through" , through . Time ( ) ,
"responses" , len ( responses ) ,
"preFilterChunks" , preFilterChunks ,
"postFilterChunks" , len ( result ) ,
"postFilterChunks" , postFilterChunks ,
"filteredChunks" , preFilterChunks - postFilterChunks ,
"preFilterSeries" , preFilterSeries ,
"postFilterSeries" , len ( seriesSeen ) ,
"postFilterSeries" , postFilterSeries ,
"filteredSeries" , preFilterSeries - postFilterSeries ,
)
postFilterChunks := len ( result )
postFilterSeries := len ( seriesSeen )
bq . metrics . chunksTotal . Add ( float64 ( preFilterChunks ) )
bq . metrics . chunksFiltered . Add ( float64 ( preFilterChunks - postFilterChunks ) )
bq . metrics . seriesTotal . Add ( float64 ( preFilterSeries ) )