mirror of https://github.com/grafana/loki
Feature/querysharding ii (#1927)
* [wip] sharding evaluator/ast
* [wip] continues experimenting with ast mapping
* refactoring in preparation for binops
* evaluators can pass state to other evaluators
* compiler alignment
* Evaluator method renamed to StepEvaluator
* chained evaluator impl
* tidying up sharding code
* handling for ConcatSampleExpr
* downstream iterator
* structure for downstreaming asts
* outlines sharding optimizations
* work on sharding mapper
* ast sharding optimizations
* test for different logrange positions
* shard mapper tests
* stronger ast sharding & tests
* shardmapper tests for string->string
* removes sharding evaluator code
* removes unused ctx arg
* Revert "removes sharding evaluator code"
This reverts commit 55d41b9519.
* interfaces for downstreaming, type conversions
* sharding plumbing on frontend
* type alignment in queryrange to downstream sharded queriers
* downstreaming support for sharding incl storage code
* removes chainedevaluator
* comment alignment
* storage shard injection
* speccing out testware for sharding equivalence
* [wip] shared engine refactor
* sorting streams, sharding eval fixes
* downstream evaluator embeds defaultevaluator
* other pkgs adopt logql changes
* metrics & logs use same middleware instantiation process
* wires up shardingware
* middleware per metrics/logfilter
* empty step populating StepEvaluator promql.Matrix adapter
* sharding metrics
* log/span injection into sharded engine
* sharding metrics avoids multiple instantiation
* downstreamhandler tracing
* sharding parameterized libsonnet
* removes querier replicas
* default 32 concurrency for workers
* jsonnet correct level override
* unquote true in yaml
* lowercase error + downstreamEvaluator defaults to embedded defaultEvaluator
* makes shardRecorder private
* logs query on failed parse
* refactors engine to be multi-use, minimizes logger injection, generalizes Query methods, removes Engine interface
* basic tests for querysharding mware
* [wip] concurrent evaluator
* integrates stat propagation into sharding evaluator
* splitby histogram
* extends le bounds for bytes processed
* byte throughput histogram buckets to 40gb
* chunk duration mixin
* fixes merge w/ field rename
* derives logger in sharded engine via ctx & logs some downstream evaluators
* moves sharded engine to top, adds comments
* logs failed merge results in stats ctx
* snapshotting stats merge logic is done more effectively
* per query concurrency controlled via downstreamer
* unexports decodereq
* queryrange testware
* downstreamer tests
* pr requests
pull/2138/head
parent
156023ae9a
commit
89d80a6fc4
@ -1,26 +0,0 @@ |
||||
package logql |
||||
|
||||
import ( |
||||
"fmt" |
||||
|
||||
"github.com/pkg/errors" |
||||
) |
||||
|
||||
// ASTMapper is the exported interface for mapping between multiple AST representations
|
||||
type ASTMapper interface { |
||||
Map(Expr) (Expr, error) |
||||
} |
||||
|
||||
// CloneExpr is a helper function to clone a node.
|
||||
func CloneExpr(expr Expr) (Expr, error) { |
||||
return ParseExpr(expr.String()) |
||||
} |
||||
|
||||
func badASTMapping(expected string, got Expr) error { |
||||
return fmt.Errorf("Bad AST mapping: expected one type (%s), but got (%T)", expected, got) |
||||
} |
||||
|
||||
// MapperUnsupportedType is a helper for signaling that an evaluator does not support an Expr type
|
||||
func MapperUnsupportedType(expr Expr, m ASTMapper) error { |
||||
return errors.Errorf("unexpected expr type (%T) for ASTMapper type (%T) ", expr, m) |
||||
} |
||||
@ -0,0 +1,67 @@ |
||||
package logql |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/prometheus/prometheus/promql" |
||||
) |
||||
|
||||
// MatrixStepper exposes a promql.Matrix as a StepEvaluator.
|
||||
// Ensure that the resulting StepEvaluator maintains
|
||||
// the same shape that the parameters expect. For example,
|
||||
// it's possible that a downstream query returns matches no
|
||||
// log streams and thus returns an empty matrix.
|
||||
// However, we still need to ensure that it can be merged effectively
|
||||
// with another leg that may match series.
|
||||
// Therefore, we determine our steps from the parameters
|
||||
// and not the underlying Matrix.
|
||||
type MatrixStepper struct { |
||||
start, end, ts time.Time |
||||
step time.Duration |
||||
m promql.Matrix |
||||
} |
||||
|
||||
func NewMatrixStepper(start, end time.Time, step time.Duration, m promql.Matrix) *MatrixStepper { |
||||
return &MatrixStepper{ |
||||
start: start, |
||||
end: end, |
||||
ts: start.Add(-step), // will be corrected on first Next() call
|
||||
step: step, |
||||
m: m, |
||||
} |
||||
} |
||||
|
||||
func (m *MatrixStepper) Next() (bool, int64, promql.Vector) { |
||||
m.ts = m.ts.Add(m.step) |
||||
if !m.ts.Before(m.end) { |
||||
return false, 0, nil |
||||
} |
||||
|
||||
ts := m.ts.UnixNano() / int64(time.Millisecond) |
||||
vec := make(promql.Vector, 0, len(m.m)) |
||||
|
||||
for i, series := range m.m { |
||||
ln := len(series.Points) |
||||
|
||||
if ln == 0 || series.Points[0].T != ts { |
||||
vec = append(vec, promql.Sample{ |
||||
Point: promql.Point{ |
||||
T: ts, |
||||
V: 0, |
||||
}, |
||||
Metric: series.Metric, |
||||
}) |
||||
continue |
||||
} |
||||
|
||||
vec = append(vec, promql.Sample{ |
||||
Point: series.Points[0], |
||||
Metric: series.Metric, |
||||
}) |
||||
m.m[i].Points = m.m[i].Points[1:] |
||||
} |
||||
|
||||
return true, ts, vec |
||||
} |
||||
|
||||
func (m *MatrixStepper) Close() error { return nil } |
||||
@ -0,0 +1,115 @@ |
||||
package logql |
||||
|
||||
import ( |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels" |
||||
"github.com/prometheus/prometheus/promql" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestMatrixStepper(t *testing.T) { |
||||
var ( |
||||
start = time.Unix(0, 0) |
||||
end = time.Unix(6, 0) |
||||
step = time.Second |
||||
) |
||||
|
||||
m := promql.Matrix{ |
||||
promql.Series{ |
||||
Metric: labels.Labels{{Name: "foo", Value: "bar"}}, |
||||
Points: []promql.Point{ |
||||
{T: start.UnixNano() / int64(step), V: 0}, |
||||
{T: start.Add(step).UnixNano() / int64(time.Millisecond), V: 1}, |
||||
{T: start.Add(2*step).UnixNano() / int64(time.Millisecond), V: 2}, |
||||
{T: start.Add(3*step).UnixNano() / int64(time.Millisecond), V: 3}, |
||||
{T: start.Add(4*step).UnixNano() / int64(time.Millisecond), V: 4}, |
||||
{T: start.Add(5*step).UnixNano() / int64(time.Millisecond), V: 5}, |
||||
}, |
||||
}, |
||||
promql.Series{ |
||||
Metric: labels.Labels{{Name: "bazz", Value: "buzz"}}, |
||||
Points: []promql.Point{ |
||||
{T: start.Add(2*step).UnixNano() / int64(time.Millisecond), V: 2}, |
||||
{T: start.Add(4*step).UnixNano() / int64(time.Millisecond), V: 4}, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
s := NewMatrixStepper(start, end, step, m) |
||||
|
||||
expected := []promql.Vector{ |
||||
{ |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.UnixNano() / int64(step), V: 0}, |
||||
Metric: labels.Labels{{Name: "foo", Value: "bar"}}, |
||||
}, |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.UnixNano() / int64(step), V: 0}, |
||||
Metric: labels.Labels{{Name: "bazz", Value: "buzz"}}, |
||||
}, |
||||
}, |
||||
{ |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.Add(step).UnixNano() / int64(time.Millisecond), V: 1}, |
||||
Metric: labels.Labels{{Name: "foo", Value: "bar"}}, |
||||
}, |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.Add(step).UnixNano() / int64(time.Millisecond), V: 0}, |
||||
Metric: labels.Labels{{Name: "bazz", Value: "buzz"}}, |
||||
}, |
||||
}, |
||||
{ |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.Add(2*step).UnixNano() / int64(time.Millisecond), V: 2}, |
||||
Metric: labels.Labels{{Name: "foo", Value: "bar"}}, |
||||
}, |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.Add(2*step).UnixNano() / int64(time.Millisecond), V: 2}, |
||||
Metric: labels.Labels{{Name: "bazz", Value: "buzz"}}, |
||||
}, |
||||
}, |
||||
{ |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.Add(3*step).UnixNano() / int64(time.Millisecond), V: 3}, |
||||
Metric: labels.Labels{{Name: "foo", Value: "bar"}}, |
||||
}, |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.Add(3*step).UnixNano() / int64(time.Millisecond), V: 0}, |
||||
Metric: labels.Labels{{Name: "bazz", Value: "buzz"}}, |
||||
}, |
||||
}, |
||||
{ |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.Add(4*step).UnixNano() / int64(time.Millisecond), V: 4}, |
||||
Metric: labels.Labels{{Name: "foo", Value: "bar"}}, |
||||
}, |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.Add(4*step).UnixNano() / int64(time.Millisecond), V: 4}, |
||||
Metric: labels.Labels{{Name: "bazz", Value: "buzz"}}, |
||||
}, |
||||
}, |
||||
{ |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.Add(5*step).UnixNano() / int64(time.Millisecond), V: 5}, |
||||
Metric: labels.Labels{{Name: "foo", Value: "bar"}}, |
||||
}, |
||||
promql.Sample{ |
||||
Point: promql.Point{T: start.Add(5*step).UnixNano() / int64(time.Millisecond), V: 0}, |
||||
Metric: labels.Labels{{Name: "bazz", Value: "buzz"}}, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
for i := 0; i < int(end.Sub(start)/step); i++ { |
||||
ok, ts, vec := s.Next() |
||||
require.Equal(t, ok, true) |
||||
require.Equal(t, start.Add(step*time.Duration(i)).UnixNano()/int64(time.Millisecond), ts) |
||||
require.Equal(t, expected[i], vec) |
||||
} |
||||
|
||||
ok, _, _ := s.Next() |
||||
|
||||
require.Equal(t, ok, false) |
||||
} |
||||
@ -0,0 +1,109 @@ |
||||
package logql |
||||
|
||||
import ( |
||||
"context" |
||||
"math" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/prometheus/prometheus/promql" |
||||
"github.com/stretchr/testify/require" |
||||
|
||||
"github.com/grafana/loki/pkg/logproto" |
||||
) |
||||
|
||||
var nilMetrics = NewShardingMetrics(nil) |
||||
|
||||
func TestMappingEquivalence(t *testing.T) { |
||||
var ( |
||||
shards = 3 |
||||
nStreams = 60 |
||||
rounds = 20 |
||||
streams = randomStreams(nStreams, rounds, shards, []string{"a", "b", "c", "d"}) |
||||
start = time.Unix(0, 0) |
||||
end = time.Unix(0, int64(time.Second*time.Duration(rounds))) |
||||
step = time.Second |
||||
interval = time.Duration(0) |
||||
limit = 100 |
||||
) |
||||
|
||||
for _, tc := range []struct { |
||||
query string |
||||
approximate bool |
||||
}{ |
||||
{`1`, false}, |
||||
{`1 + 1`, false}, |
||||
{`{a="1"}`, false}, |
||||
{`{a="1"} |= "number: 10"`, false}, |
||||
{`rate({a=~".*"}[1s])`, false}, |
||||
{`sum by (a) (rate({a=~".*"}[1s]))`, false}, |
||||
{`max without (a) (rate({a=~".*"}[1s]))`, false}, |
||||
{`count(rate({a=~".*"}[1s]))`, false}, |
||||
{`avg(rate({a=~".*"}[1s]))`, true}, |
||||
{`1 + sum by (cluster) (rate({a=~".*"}[1s]))`, false}, |
||||
{`sum(max(rate({a=~".*"}[1s])))`, false}, |
||||
{`max(count(rate({a=~".*"}[1s])))`, false}, |
||||
{`max(sum by (cluster) (rate({a=~".*"}[1s]))) / count(rate({a=~".*"}[1s]))`, false}, |
||||
// topk prefers already-seen values in tiebreakers. Since the test data generates
|
||||
// the same log lines for each series & the resulting promql.Vectors aren't deterministically
|
||||
// sorted by labels, we don't expect this to pass.
|
||||
// We could sort them as stated, but it doesn't seem worth the performance hit.
|
||||
// {`topk(3, rate({a=~".*"}[1s]))`, false},
|
||||
} { |
||||
q := NewMockQuerier( |
||||
shards, |
||||
streams, |
||||
) |
||||
|
||||
opts := EngineOpts{} |
||||
regular := NewEngine(opts, q) |
||||
sharded := NewShardedEngine(opts, MockDownstreamer{regular}, nilMetrics) |
||||
|
||||
t.Run(tc.query, func(t *testing.T) { |
||||
params := NewLiteralParams( |
||||
tc.query, |
||||
start, |
||||
end, |
||||
step, |
||||
interval, |
||||
logproto.FORWARD, |
||||
uint32(limit), |
||||
nil, |
||||
) |
||||
qry := regular.Query(params) |
||||
shardedQry := sharded.Query(params, shards) |
||||
|
||||
res, err := qry.Exec(context.Background()) |
||||
require.Nil(t, err) |
||||
|
||||
shardedRes, err := shardedQry.Exec(context.Background()) |
||||
require.Nil(t, err) |
||||
|
||||
if tc.approximate { |
||||
approximatelyEquals(t, res.Data.(promql.Matrix), shardedRes.Data.(promql.Matrix)) |
||||
} else { |
||||
require.Equal(t, res.Data, shardedRes.Data) |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
|
||||
// approximatelyEquals ensures two responses are approximately equal, up to 6 decimals precision per sample
|
||||
func approximatelyEquals(t *testing.T, as, bs promql.Matrix) { |
||||
require.Equal(t, len(as), len(bs)) |
||||
|
||||
for i := 0; i < len(as); i++ { |
||||
a := as[i] |
||||
b := bs[i] |
||||
require.Equal(t, a.Metric, b.Metric) |
||||
require.Equal(t, len(a.Points), len(b.Points)) |
||||
|
||||
for j := 0; j < len(a.Points); j++ { |
||||
aSample := &a.Points[j] |
||||
aSample.V = math.Round(aSample.V*1e6) / 1e6 |
||||
bSample := &b.Points[j] |
||||
bSample.V = math.Round(bSample.V*1e6) / 1e6 |
||||
} |
||||
require.Equal(t, a, b) |
||||
} |
||||
} |
||||
@ -0,0 +1,172 @@ |
||||
package logql |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"log" |
||||
"time" |
||||
|
||||
"github.com/cortexproject/cortex/pkg/querier/astmapper" |
||||
"github.com/prometheus/prometheus/pkg/labels" |
||||
"github.com/prometheus/prometheus/promql" |
||||
|
||||
"github.com/grafana/loki/pkg/iter" |
||||
"github.com/grafana/loki/pkg/logproto" |
||||
) |
||||
|
||||
func NewMockQuerier(shards int, streams []logproto.Stream) MockQuerier { |
||||
return MockQuerier{ |
||||
shards: shards, |
||||
streams: streams, |
||||
} |
||||
} |
||||
|
||||
// Shard aware mock querier
|
||||
type MockQuerier struct { |
||||
shards int |
||||
streams []logproto.Stream |
||||
} |
||||
|
||||
func (q MockQuerier) Select(_ context.Context, req SelectParams) (iter.EntryIterator, error) { |
||||
expr, err := req.LogSelector() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
filter, err := expr.Filter() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
matchers := expr.Matchers() |
||||
|
||||
var shard *astmapper.ShardAnnotation |
||||
if len(req.Shards) > 0 { |
||||
shards, err := ParseShards(req.Shards) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
shard = &shards[0] |
||||
} |
||||
|
||||
var matched []logproto.Stream |
||||
|
||||
outer: |
||||
for _, stream := range q.streams { |
||||
ls := mustParseLabels(stream.Labels) |
||||
|
||||
// filter by shard if requested
|
||||
if shard != nil && ls.Hash()%uint64(shard.Of) != uint64(shard.Shard) { |
||||
continue |
||||
} |
||||
|
||||
for _, matcher := range matchers { |
||||
if !matcher.Matches(ls.Get(matcher.Name)) { |
||||
continue outer |
||||
} |
||||
} |
||||
matched = append(matched, stream) |
||||
} |
||||
|
||||
// apply the LineFilter
|
||||
filtered := make([]logproto.Stream, 0, len(matched)) |
||||
if filter == nil || filter == TrueFilter { |
||||
filtered = matched |
||||
} else { |
||||
for _, s := range matched { |
||||
var entries []logproto.Entry |
||||
for _, entry := range s.Entries { |
||||
if filter.Filter([]byte(entry.Line)) { |
||||
entries = append(entries, entry) |
||||
} |
||||
} |
||||
|
||||
if len(entries) > 0 { |
||||
filtered = append(filtered, logproto.Stream{ |
||||
Labels: s.Labels, |
||||
Entries: entries, |
||||
}) |
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
return iter.NewTimeRangedIterator( |
||||
iter.NewStreamsIterator(context.Background(), filtered, req.Direction), |
||||
req.Start, |
||||
req.End, |
||||
), nil |
||||
} |
||||
|
||||
type MockDownstreamer struct { |
||||
*Engine |
||||
} |
||||
|
||||
func (m MockDownstreamer) Downstreamer() Downstreamer { return m } |
||||
|
||||
func (d MockDownstreamer) Downstream(ctx context.Context, queries []DownstreamQuery) ([]Result, error) { |
||||
results := make([]Result, 0, len(queries)) |
||||
for _, query := range queries { |
||||
params := NewLiteralParams( |
||||
query.Expr.String(), |
||||
query.Params.Start(), |
||||
query.Params.End(), |
||||
query.Params.Step(), |
||||
query.Params.Interval(), |
||||
query.Params.Direction(), |
||||
query.Params.Limit(), |
||||
query.Shards.Encode(), |
||||
) |
||||
res, err := d.Query(params).Exec(ctx) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
results = append(results, res) |
||||
} |
||||
return results, nil |
||||
|
||||
} |
||||
|
||||
// create nStreams of nEntries with labelNames each where each label value
|
||||
// with the exception of the "index" label is modulo'd into a shard
|
||||
func randomStreams(nStreams, nEntries, nShards int, labelNames []string) (streams []logproto.Stream) { |
||||
for i := 0; i < nStreams; i++ { |
||||
// labels
|
||||
stream := logproto.Stream{} |
||||
ls := labels.Labels{{Name: "index", Value: fmt.Sprintf("%d", i)}} |
||||
|
||||
for _, lName := range labelNames { |
||||
// I needed a way to hash something to uint64
|
||||
// in order to get some form of random label distribution
|
||||
shard := append(ls, labels.Label{ |
||||
Name: lName, |
||||
Value: fmt.Sprintf("%d", i), |
||||
}).Hash() % uint64(nShards) |
||||
|
||||
ls = append(ls, labels.Label{ |
||||
Name: lName, |
||||
Value: fmt.Sprintf("%d", shard), |
||||
}) |
||||
} |
||||
for j := 0; j < nEntries; j++ { |
||||
stream.Entries = append(stream.Entries, logproto.Entry{ |
||||
Timestamp: time.Unix(0, int64(j*int(time.Second))), |
||||
Line: fmt.Sprintf("line number: %d", j), |
||||
}) |
||||
} |
||||
|
||||
stream.Labels = ls.String() |
||||
streams = append(streams, stream) |
||||
} |
||||
return streams |
||||
|
||||
} |
||||
|
||||
func mustParseLabels(s string) labels.Labels { |
||||
labels, err := promql.ParseMetric(s) |
||||
if err != nil { |
||||
log.Fatalf("Failed to parse %s", s) |
||||
} |
||||
|
||||
return labels |
||||
} |
||||
@ -0,0 +1,185 @@ |
||||
package queryrange |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/cortexproject/cortex/pkg/querier/queryrange" |
||||
"github.com/cortexproject/cortex/pkg/util/spanlogger" |
||||
"github.com/go-kit/kit/log/level" |
||||
"github.com/prometheus/prometheus/pkg/labels" |
||||
"github.com/prometheus/prometheus/promql" |
||||
|
||||
"github.com/grafana/loki/pkg/logql" |
||||
) |
||||
|
||||
const ( |
||||
DefaultDownstreamConcurrency = 32 |
||||
) |
||||
|
||||
type DownstreamHandler struct { |
||||
next queryrange.Handler |
||||
} |
||||
|
||||
func ParamsToLokiRequest(params logql.Params) *LokiRequest { |
||||
return &LokiRequest{ |
||||
Query: params.Query(), |
||||
Limit: params.Limit(), |
||||
Step: int64(params.Step() / time.Millisecond), |
||||
StartTs: params.Start(), |
||||
EndTs: params.End(), |
||||
Direction: params.Direction(), |
||||
Path: "/loki/api/v1/query_range", // TODO(owen-d): make this derivable
|
||||
} |
||||
} |
||||
|
||||
func (h DownstreamHandler) Downstreamer() logql.Downstreamer { |
||||
p := DefaultDownstreamConcurrency |
||||
locks := make(chan struct{}, p) |
||||
for i := 0; i < p; i++ { |
||||
locks <- struct{}{} |
||||
} |
||||
return &instance{ |
||||
parallelism: p, |
||||
locks: locks, |
||||
handler: h.next, |
||||
} |
||||
} |
||||
|
||||
// instance is an intermediate struct for controlling concurrency across a single query
|
||||
type instance struct { |
||||
parallelism int |
||||
locks chan struct{} |
||||
handler queryrange.Handler |
||||
} |
||||
|
||||
func (i instance) Downstream(ctx context.Context, queries []logql.DownstreamQuery) ([]logql.Result, error) { |
||||
return i.For(queries, func(qry logql.DownstreamQuery) (logql.Result, error) { |
||||
req := ParamsToLokiRequest(qry.Params).WithShards(qry.Shards).WithQuery(qry.Expr.String()).(*LokiRequest) |
||||
logger, ctx := spanlogger.New(ctx, "DownstreamHandler.instance") |
||||
defer logger.Finish() |
||||
level.Debug(logger).Log("shards", req.Shards, "query", req.Query) |
||||
|
||||
res, err := i.handler.Do(ctx, req) |
||||
if err != nil { |
||||
return logql.Result{}, err |
||||
} |
||||
return ResponseToResult(res) |
||||
}) |
||||
} |
||||
|
||||
// For runs a function against a list of queries, collecting the results or returning an error. The indices are preserved such that input[i] maps to output[i].
|
||||
func (in instance) For( |
||||
queries []logql.DownstreamQuery, |
||||
fn func(logql.DownstreamQuery) (logql.Result, error), |
||||
) ([]logql.Result, error) { |
||||
type resp struct { |
||||
i int |
||||
res logql.Result |
||||
err error |
||||
} |
||||
|
||||
done := make(chan struct{}) |
||||
defer close(done) |
||||
|
||||
ch := make(chan resp) |
||||
|
||||
// Make one goroutine to dispatch the other goroutines, bounded by instance parallelism
|
||||
go func() { |
||||
for i := 0; i < len(queries); i++ { |
||||
select { |
||||
case <-done: |
||||
break |
||||
case <-in.locks: |
||||
go func(i int) { |
||||
// release lock back into pool
|
||||
defer func() { |
||||
in.locks <- struct{}{} |
||||
}() |
||||
|
||||
res, err := fn(queries[i]) |
||||
response := resp{ |
||||
i: i, |
||||
res: res, |
||||
err: err, |
||||
} |
||||
|
||||
// Feed the result into the channel unless the work has completed.
|
||||
select { |
||||
case <-done: |
||||
case ch <- response: |
||||
} |
||||
}(i) |
||||
} |
||||
} |
||||
}() |
||||
|
||||
results := make([]logql.Result, len(queries)) |
||||
for i := 0; i < len(queries); i++ { |
||||
resp := <-ch |
||||
if resp.err != nil { |
||||
return nil, resp.err |
||||
} |
||||
results[resp.i] = resp.res |
||||
} |
||||
return results, nil |
||||
|
||||
} |
||||
|
||||
// convert to matrix
|
||||
func sampleStreamToMatrix(streams []queryrange.SampleStream) promql.Value { |
||||
xs := make(promql.Matrix, 0, len(streams)) |
||||
for _, stream := range streams { |
||||
x := promql.Series{} |
||||
x.Metric = make(labels.Labels, 0, len(stream.Labels)) |
||||
for _, l := range stream.Labels { |
||||
x.Metric = append(x.Metric, labels.Label(l)) |
||||
|
||||
} |
||||
|
||||
x.Points = make([]promql.Point, 0, len(stream.Samples)) |
||||
for _, sample := range stream.Samples { |
||||
x.Points = append(x.Points, promql.Point{ |
||||
T: sample.TimestampMs, |
||||
V: sample.Value, |
||||
}) |
||||
} |
||||
|
||||
xs = append(xs, x) |
||||
} |
||||
return xs |
||||
} |
||||
|
||||
func ResponseToResult(resp queryrange.Response) (logql.Result, error) { |
||||
switch r := resp.(type) { |
||||
case *LokiResponse: |
||||
if r.Error != "" { |
||||
return logql.Result{}, fmt.Errorf("%s: %s", r.ErrorType, r.Error) |
||||
} |
||||
|
||||
streams := make(logql.Streams, 0, len(r.Data.Result)) |
||||
|
||||
for _, stream := range r.Data.Result { |
||||
streams = append(streams, stream) |
||||
} |
||||
|
||||
return logql.Result{ |
||||
Statistics: r.Statistics, |
||||
Data: streams, |
||||
}, nil |
||||
|
||||
case *LokiPromResponse: |
||||
if r.Response.Error != "" { |
||||
return logql.Result{}, fmt.Errorf("%s: %s", r.Response.ErrorType, r.Response.Error) |
||||
} |
||||
|
||||
return logql.Result{ |
||||
Statistics: r.Statistics, |
||||
Data: sampleStreamToMatrix(r.Response.Data.Result), |
||||
}, nil |
||||
|
||||
default: |
||||
return logql.Result{}, fmt.Errorf("cannot decode (%T)", resp) |
||||
} |
||||
} |
||||
@ -0,0 +1,344 @@ |
||||
package queryrange |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"sync" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/cortexproject/cortex/pkg/ingester/client" |
||||
"github.com/cortexproject/cortex/pkg/querier/queryrange" |
||||
"github.com/grafana/loki/pkg/logproto" |
||||
"github.com/grafana/loki/pkg/logql" |
||||
"github.com/grafana/loki/pkg/logql/stats" |
||||
"github.com/prometheus/prometheus/pkg/labels" |
||||
"github.com/prometheus/prometheus/promql" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func testSampleStreams() []queryrange.SampleStream { |
||||
return []queryrange.SampleStream{ |
||||
{ |
||||
Labels: []client.LabelAdapter{{Name: "foo", Value: "bar"}}, |
||||
Samples: []client.Sample{ |
||||
{ |
||||
Value: 0, |
||||
TimestampMs: 0, |
||||
}, |
||||
{ |
||||
Value: 1, |
||||
TimestampMs: 1, |
||||
}, |
||||
{ |
||||
Value: 2, |
||||
TimestampMs: 2, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Labels: []client.LabelAdapter{{Name: "bazz", Value: "buzz"}}, |
||||
Samples: []client.Sample{ |
||||
{ |
||||
Value: 4, |
||||
TimestampMs: 4, |
||||
}, |
||||
{ |
||||
Value: 5, |
||||
TimestampMs: 5, |
||||
}, |
||||
{ |
||||
Value: 6, |
||||
TimestampMs: 6, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
func TestSampleStreamToMatrix(t *testing.T) { |
||||
input := testSampleStreams() |
||||
expected := promql.Matrix{ |
||||
{ |
||||
Metric: labels.FromMap(map[string]string{ |
||||
"foo": "bar", |
||||
}), |
||||
Points: []promql.Point{ |
||||
{ |
||||
V: 0, |
||||
T: 0, |
||||
}, |
||||
{ |
||||
V: 1, |
||||
T: 1, |
||||
}, |
||||
{ |
||||
V: 2, |
||||
T: 2, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Metric: labels.FromMap(map[string]string{ |
||||
"bazz": "buzz", |
||||
}), |
||||
Points: []promql.Point{ |
||||
{ |
||||
V: 4, |
||||
T: 4, |
||||
}, |
||||
{ |
||||
V: 5, |
||||
T: 5, |
||||
}, |
||||
{ |
||||
V: 6, |
||||
T: 6, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
require.Equal(t, expected, sampleStreamToMatrix(input)) |
||||
} |
||||
|
||||
func TestResponseToResult(t *testing.T) { |
||||
for _, tc := range []struct { |
||||
desc string |
||||
input queryrange.Response |
||||
err bool |
||||
expected logql.Result |
||||
}{ |
||||
{ |
||||
desc: "LokiResponse", |
||||
input: &LokiResponse{ |
||||
Data: LokiData{ |
||||
Result: []logproto.Stream{{ |
||||
Labels: `{foo="bar"}`, |
||||
}}, |
||||
}, |
||||
Statistics: stats.Result{ |
||||
Summary: stats.Summary{ExecTime: 1}, |
||||
}, |
||||
}, |
||||
expected: logql.Result{ |
||||
Statistics: stats.Result{ |
||||
Summary: stats.Summary{ExecTime: 1}, |
||||
}, |
||||
Data: logql.Streams{{ |
||||
Labels: `{foo="bar"}`, |
||||
}}, |
||||
}, |
||||
}, |
||||
{ |
||||
desc: "LokiResponseError", |
||||
input: &LokiResponse{ |
||||
Error: "foo", |
||||
ErrorType: "bar", |
||||
}, |
||||
err: true, |
||||
}, |
||||
{ |
||||
desc: "LokiPromResponse", |
||||
input: &LokiPromResponse{ |
||||
Statistics: stats.Result{ |
||||
Summary: stats.Summary{ExecTime: 1}, |
||||
}, |
||||
Response: &queryrange.PrometheusResponse{ |
||||
Data: queryrange.PrometheusData{ |
||||
Result: testSampleStreams(), |
||||
}, |
||||
}, |
||||
}, |
||||
expected: logql.Result{ |
||||
Statistics: stats.Result{ |
||||
Summary: stats.Summary{ExecTime: 1}, |
||||
}, |
||||
Data: sampleStreamToMatrix(testSampleStreams()), |
||||
}, |
||||
}, |
||||
{ |
||||
desc: "LokiPromResponseError", |
||||
input: &LokiPromResponse{ |
||||
Response: &queryrange.PrometheusResponse{ |
||||
Error: "foo", |
||||
ErrorType: "bar", |
||||
}, |
||||
}, |
||||
err: true, |
||||
}, |
||||
{ |
||||
desc: "UnexpectedTypeError", |
||||
input: nil, |
||||
err: true, |
||||
}, |
||||
} { |
||||
t.Run(tc.desc, func(t *testing.T) { |
||||
out, err := ResponseToResult(tc.input) |
||||
if tc.err { |
||||
require.NotNil(t, err) |
||||
} |
||||
require.Equal(t, tc.expected, out) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestDownstreamHandler(t *testing.T) { |
||||
|
||||
// Pretty poor test, but this is just a passthrough struct, so ensure we create locks
|
||||
// and can consume them
|
||||
h := DownstreamHandler{nil} |
||||
in := h.Downstreamer().(*instance) |
||||
require.Equal(t, DefaultDownstreamConcurrency, in.parallelism) |
||||
require.NotNil(t, in.locks) |
||||
ensureParallelism(t, in, in.parallelism) |
||||
} |
||||
|
||||
// Consumes the locks in an instance, making sure they're all available. Does not replace them and thus instance is unusuable after. This is a cleanup test to ensure internal state
|
||||
func ensureParallelism(t *testing.T, in *instance, n int) { |
||||
for i := 0; i < n; i++ { |
||||
select { |
||||
case <-in.locks: |
||||
case <-time.After(time.Millisecond): |
||||
require.FailNow(t, "lock couldn't be acquired") |
||||
} |
||||
} |
||||
// ensure no more locks available
|
||||
select { |
||||
case <-in.locks: |
||||
require.FailNow(t, "unexpected lock acquisition") |
||||
default: |
||||
} |
||||
} |
||||
|
||||
func TestInstanceFor(t *testing.T) { |
||||
mkIn := func() *instance { return DownstreamHandler{nil}.Downstreamer().(*instance) } |
||||
in := mkIn() |
||||
|
||||
queries := make([]logql.DownstreamQuery, in.parallelism+1) |
||||
var mtx sync.Mutex |
||||
var ct int |
||||
|
||||
// ensure we can execute queries that number more than the parallelism parameter
|
||||
_, err := in.For(queries, func(_ logql.DownstreamQuery) (logql.Result, error) { |
||||
mtx.Lock() |
||||
defer mtx.Unlock() |
||||
ct++ |
||||
return logql.Result{}, nil |
||||
}) |
||||
require.Nil(t, err) |
||||
require.Equal(t, len(queries), ct) |
||||
ensureParallelism(t, in, in.parallelism) |
||||
|
||||
// ensure an early error abandons the other queues queries
|
||||
in = mkIn() |
||||
ct = 0 |
||||
_, err = in.For(queries, func(_ logql.DownstreamQuery) (logql.Result, error) { |
||||
mtx.Lock() |
||||
defer mtx.Unlock() |
||||
ct++ |
||||
return logql.Result{}, errors.New("testerr") |
||||
}) |
||||
require.NotNil(t, err) |
||||
// Ensure no more than the initial batch was parallelized.
|
||||
require.LessOrEqual(t, ct, in.parallelism) |
||||
ensureParallelism(t, in, in.parallelism) |
||||
|
||||
in = mkIn() |
||||
results, err := in.For( |
||||
[]logql.DownstreamQuery{ |
||||
{ |
||||
Shards: logql.Shards{ |
||||
{Shard: 0, Of: 2}, |
||||
}, |
||||
}, |
||||
{ |
||||
Shards: logql.Shards{ |
||||
{Shard: 1, Of: 2}, |
||||
}, |
||||
}, |
||||
}, |
||||
func(qry logql.DownstreamQuery) (logql.Result, error) { |
||||
|
||||
return logql.Result{ |
||||
Data: logql.Streams{{ |
||||
Labels: qry.Shards[0].String(), |
||||
}}, |
||||
}, nil |
||||
}, |
||||
) |
||||
require.Nil(t, err) |
||||
require.Equal( |
||||
t, |
||||
[]logql.Result{ |
||||
logql.Result{ |
||||
Data: logql.Streams{{Labels: "0_of_2"}}, |
||||
}, |
||||
logql.Result{ |
||||
Data: logql.Streams{{Labels: "1_of_2"}}, |
||||
}, |
||||
}, |
||||
results, |
||||
) |
||||
ensureParallelism(t, in, in.parallelism) |
||||
|
||||
} |
||||
|
||||
func TestInstanceDownstream(t *testing.T) { |
||||
params := logql.NewLiteralParams( |
||||
"", |
||||
time.Now(), |
||||
time.Now(), |
||||
0, |
||||
0, |
||||
logproto.BACKWARD, |
||||
1000, |
||||
nil, |
||||
) |
||||
expr, err := logql.ParseExpr(`{foo="bar"}`) |
||||
require.Nil(t, err) |
||||
|
||||
expectedResp := func() *LokiResponse { |
||||
return &LokiResponse{ |
||||
Data: LokiData{ |
||||
Result: []logproto.Stream{{ |
||||
Labels: `{foo="bar"}`, |
||||
}}, |
||||
}, |
||||
Statistics: stats.Result{ |
||||
Summary: stats.Summary{ExecTime: 1}, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
queries := []logql.DownstreamQuery{ |
||||
{ |
||||
Expr: expr, |
||||
Params: params, |
||||
Shards: logql.Shards{{Shard: 0, Of: 2}}, |
||||
}, |
||||
} |
||||
|
||||
var got queryrange.Request |
||||
var want queryrange.Request |
||||
handler := queryrange.HandlerFunc( |
||||
func(_ context.Context, req queryrange.Request) (queryrange.Response, error) { |
||||
// for some reason these seemingly can't be checked in their own goroutines,
|
||||
// so we assign them to scoped variables for later comparison.
|
||||
got = req |
||||
want = ParamsToLokiRequest(params).WithShards(logql.Shards{{Shard: 0, Of: 2}}).WithQuery(expr.String()) |
||||
|
||||
return expectedResp(), nil |
||||
}, |
||||
) |
||||
|
||||
expected, err := ResponseToResult(expectedResp()) |
||||
require.Nil(t, err) |
||||
|
||||
results, err := DownstreamHandler{handler}.Downstreamer().Downstream(context.Background(), queries) |
||||
|
||||
require.Equal(t, want, got) |
||||
|
||||
require.Nil(t, err) |
||||
require.Equal(t, []logql.Result{expected}, results) |
||||
|
||||
} |
||||
@ -0,0 +1,239 @@ |
||||
package queryrange |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/cortexproject/cortex/pkg/querier/queryrange" |
||||
"github.com/cortexproject/cortex/pkg/util/spanlogger" |
||||
"github.com/go-kit/kit/log" |
||||
"github.com/go-kit/kit/log/level" |
||||
"github.com/prometheus/prometheus/promql" |
||||
|
||||
"github.com/grafana/loki/pkg/loghttp" |
||||
"github.com/grafana/loki/pkg/logql" |
||||
"github.com/grafana/loki/pkg/logql/marshal" |
||||
) |
||||
|
||||
var nanosecondsInMillisecond = int64(time.Millisecond / time.Nanosecond) |
||||
|
||||
// NewQueryShardMiddleware creates a middleware which downstreams queries after AST mapping and query encoding.
|
||||
func NewQueryShardMiddleware( |
||||
logger log.Logger, |
||||
confs queryrange.ShardingConfigs, |
||||
minShardingLookback time.Duration, |
||||
middlewareMetrics *queryrange.InstrumentMiddlewareMetrics, |
||||
shardingMetrics *logql.ShardingMetrics, |
||||
) queryrange.Middleware { |
||||
|
||||
noshards := !hasShards(confs) |
||||
|
||||
if noshards { |
||||
level.Warn(logger).Log( |
||||
"middleware", "QueryShard", |
||||
"msg", "no configuration with shard found", |
||||
"confs", fmt.Sprintf("%+v", confs), |
||||
) |
||||
return queryrange.PassthroughMiddleware |
||||
} |
||||
|
||||
mapperware := queryrange.MiddlewareFunc(func(next queryrange.Handler) queryrange.Handler { |
||||
return newASTMapperware(confs, next, logger, shardingMetrics) |
||||
}) |
||||
|
||||
return queryrange.MiddlewareFunc(func(next queryrange.Handler) queryrange.Handler { |
||||
return &shardSplitter{ |
||||
MinShardingLookback: minShardingLookback, |
||||
shardingware: queryrange.MergeMiddlewares( |
||||
queryrange.InstrumentMiddleware("shardingware", middlewareMetrics), |
||||
mapperware, |
||||
).Wrap(next), |
||||
now: time.Now, |
||||
next: queryrange.InstrumentMiddleware("sharding-bypass", middlewareMetrics).Wrap(next), |
||||
} |
||||
}) |
||||
|
||||
} |
||||
|
||||
func newASTMapperware( |
||||
confs queryrange.ShardingConfigs, |
||||
next queryrange.Handler, |
||||
logger log.Logger, |
||||
metrics *logql.ShardingMetrics, |
||||
) *astMapperware { |
||||
|
||||
return &astMapperware{ |
||||
confs: confs, |
||||
logger: log.With(logger, "middleware", "QueryShard.astMapperware"), |
||||
next: next, |
||||
ng: logql.NewShardedEngine(logql.EngineOpts{}, DownstreamHandler{next}, metrics), |
||||
} |
||||
} |
||||
|
||||
type astMapperware struct { |
||||
confs queryrange.ShardingConfigs |
||||
logger log.Logger |
||||
next queryrange.Handler |
||||
ng *logql.ShardedEngine |
||||
} |
||||
|
||||
func (ast *astMapperware) Do(ctx context.Context, r queryrange.Request) (queryrange.Response, error) { |
||||
conf, err := ast.confs.GetConf(r) |
||||
// cannot shard with this timerange
|
||||
if err != nil { |
||||
level.Warn(ast.logger).Log("err", err.Error(), "msg", "skipped AST mapper for request") |
||||
return ast.next.Do(ctx, r) |
||||
} |
||||
|
||||
shardedLog, ctx := spanlogger.New(ctx, "shardedEngine") |
||||
defer shardedLog.Finish() |
||||
|
||||
req, ok := r.(*LokiRequest) |
||||
if !ok { |
||||
return nil, fmt.Errorf("expected *LokiRequest, got (%T)", r) |
||||
} |
||||
params := paramsFromRequest(req) |
||||
query := ast.ng.Query(params, int(conf.RowShards)) |
||||
|
||||
res, err := query.Exec(ctx) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
value, err := marshal.NewResultValue(res.Data) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
switch res.Data.Type() { |
||||
case promql.ValueTypeMatrix: |
||||
return &LokiPromResponse{ |
||||
Response: &queryrange.PrometheusResponse{ |
||||
Status: loghttp.QueryStatusSuccess, |
||||
Data: queryrange.PrometheusData{ |
||||
ResultType: loghttp.ResultTypeMatrix, |
||||
Result: toProto(value.(loghttp.Matrix)), |
||||
}, |
||||
}, |
||||
Statistics: res.Statistics, |
||||
}, nil |
||||
case logql.ValueTypeStreams: |
||||
return &LokiResponse{ |
||||
Status: loghttp.QueryStatusSuccess, |
||||
Direction: req.Direction, |
||||
Limit: req.Limit, |
||||
Version: uint32(loghttp.GetVersion(req.Path)), |
||||
Statistics: res.Statistics, |
||||
Data: LokiData{ |
||||
ResultType: loghttp.ResultTypeStream, |
||||
Result: value.(loghttp.Streams).ToProto(), |
||||
}, |
||||
}, nil |
||||
default: |
||||
return nil, fmt.Errorf("unexpected downstream response type (%T)", res.Data) |
||||
} |
||||
} |
||||
|
||||
// shardSplitter middleware will only shard appropriate requests that do not extend past the MinShardingLookback interval.
|
||||
// This is used to send nonsharded requests to the ingesters in order to not overload them.
|
||||
// TODO(owen-d): export in cortex so we don't duplicate code
|
||||
type shardSplitter struct { |
||||
MinShardingLookback time.Duration // delimiter for splitting sharded vs non-sharded queries
|
||||
shardingware queryrange.Handler // handler for sharded queries
|
||||
next queryrange.Handler // handler for non-sharded queries
|
||||
now func() time.Time // injectable time.Now
|
||||
} |
||||
|
||||
func (splitter *shardSplitter) Do(ctx context.Context, r queryrange.Request) (queryrange.Response, error) { |
||||
cutoff := splitter.now().Add(-splitter.MinShardingLookback) |
||||
sharded, nonsharded := partitionRequest(r, cutoff) |
||||
|
||||
return splitter.parallel(ctx, sharded, nonsharded) |
||||
|
||||
} |
||||
|
||||
func (splitter *shardSplitter) parallel(ctx context.Context, sharded, nonsharded queryrange.Request) (queryrange.Response, error) { |
||||
if sharded == nil { |
||||
return splitter.next.Do(ctx, nonsharded) |
||||
} |
||||
|
||||
if nonsharded == nil { |
||||
return splitter.shardingware.Do(ctx, sharded) |
||||
} |
||||
|
||||
nonshardCh := make(chan queryrange.Response, 1) |
||||
shardCh := make(chan queryrange.Response, 1) |
||||
errCh := make(chan error, 2) |
||||
|
||||
go func() { |
||||
res, err := splitter.next.Do(ctx, nonsharded) |
||||
if err != nil { |
||||
errCh <- err |
||||
return |
||||
} |
||||
nonshardCh <- res |
||||
|
||||
}() |
||||
|
||||
go func() { |
||||
res, err := splitter.shardingware.Do(ctx, sharded) |
||||
if err != nil { |
||||
errCh <- err |
||||
return |
||||
} |
||||
shardCh <- res |
||||
}() |
||||
|
||||
resps := make([]queryrange.Response, 0, 2) |
||||
for i := 0; i < 2; i++ { |
||||
select { |
||||
case r := <-nonshardCh: |
||||
resps = append(resps, r) |
||||
case r := <-shardCh: |
||||
resps = append(resps, r) |
||||
case err := <-errCh: |
||||
return nil, err |
||||
case <-ctx.Done(): |
||||
return nil, ctx.Err() |
||||
} |
||||
|
||||
} |
||||
|
||||
return lokiCodec.MergeResponse(resps...) |
||||
} |
||||
|
||||
// TODO(owen-d): export in cortex so we don't duplicate code
|
||||
func hasShards(confs queryrange.ShardingConfigs) bool { |
||||
for _, conf := range confs { |
||||
if conf.RowShards > 0 { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// partitionRequet splits a request into potentially multiple requests, one including the request's time range
|
||||
// [0,t). The other will include [t,inf)
|
||||
// TODO(owen-d): export in cortex so we don't duplicate code
|
||||
func partitionRequest(r queryrange.Request, t time.Time) (before queryrange.Request, after queryrange.Request) { |
||||
boundary := TimeToMillis(t) |
||||
if r.GetStart() >= boundary { |
||||
return nil, r |
||||
} |
||||
|
||||
if r.GetEnd() < boundary { |
||||
return r, nil |
||||
} |
||||
|
||||
return r.WithStartEnd(r.GetStart(), boundary), r.WithStartEnd(boundary, r.GetEnd()) |
||||
} |
||||
|
||||
// TimeFromMillis is a helper to turn milliseconds -> time.Time
|
||||
func TimeFromMillis(ms int64) time.Time { |
||||
return time.Unix(0, ms*nanosecondsInMillisecond) |
||||
} |
||||
|
||||
func TimeToMillis(t time.Time) int64 { |
||||
return t.UnixNano() / nanosecondsInMillisecond |
||||
} |
||||
@ -0,0 +1,189 @@ |
||||
package queryrange |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"sort" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/cortexproject/cortex/pkg/chunk" |
||||
"github.com/cortexproject/cortex/pkg/querier/queryrange" |
||||
"github.com/go-kit/kit/log" |
||||
"github.com/grafana/loki/pkg/loghttp" |
||||
"github.com/grafana/loki/pkg/logproto" |
||||
"github.com/grafana/loki/pkg/logql" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
var ( |
||||
nilShardingMetrics = logql.NewShardingMetrics(nil) |
||||
defaultReq = func() *LokiRequest { |
||||
return &LokiRequest{ |
||||
Limit: 100, |
||||
StartTs: start, |
||||
EndTs: end, |
||||
Direction: logproto.BACKWARD, |
||||
Path: "/loki/api/v1/query_range", |
||||
} |
||||
} |
||||
lokiResps = []queryrange.Response{ |
||||
&LokiResponse{ |
||||
Status: loghttp.QueryStatusSuccess, |
||||
Direction: logproto.BACKWARD, |
||||
Limit: defaultReq().Limit, |
||||
Version: 1, |
||||
Data: LokiData{ |
||||
ResultType: loghttp.ResultTypeStream, |
||||
Result: []logproto.Stream{ |
||||
{ |
||||
Labels: `{foo="bar", level="debug"}`, |
||||
Entries: []logproto.Entry{ |
||||
{Timestamp: time.Unix(0, 6), Line: "6"}, |
||||
{Timestamp: time.Unix(0, 5), Line: "5"}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
&LokiResponse{ |
||||
Status: loghttp.QueryStatusSuccess, |
||||
Direction: logproto.BACKWARD, |
||||
Limit: 100, |
||||
Version: 1, |
||||
Data: LokiData{ |
||||
ResultType: loghttp.ResultTypeStream, |
||||
Result: []logproto.Stream{ |
||||
{ |
||||
Labels: `{foo="bar", level="error"}`, |
||||
Entries: []logproto.Entry{ |
||||
{Timestamp: time.Unix(0, 2), Line: "2"}, |
||||
{Timestamp: time.Unix(0, 1), Line: "1"}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
) |
||||
|
||||
func Test_PartitionRequest(t *testing.T) { |
||||
midpt := time.Unix(0, 0).Add(500 * time.Millisecond) |
||||
cutoff := TimeToMillis(midpt) |
||||
|
||||
// test split
|
||||
req := defaultReq().WithStartEnd(0, cutoff*2) |
||||
before, after := partitionRequest(req, midpt) |
||||
require.Equal(t, req.WithStartEnd(0, cutoff), before) |
||||
require.Equal(t, req.WithStartEnd(cutoff, 2*cutoff), after) |
||||
|
||||
// test all before cutoff
|
||||
before, after = partitionRequest(req, midpt.Add(1000*time.Millisecond)) |
||||
require.Equal(t, req, before) |
||||
require.Nil(t, after) |
||||
|
||||
// test after cutoff
|
||||
before, after = partitionRequest(req, time.Unix(0, 0)) |
||||
require.Nil(t, before) |
||||
require.Equal(t, req, after) |
||||
|
||||
} |
||||
|
||||
func Test_shardSplitter(t *testing.T) { |
||||
splitter := &shardSplitter{ |
||||
shardingware: mockHandler(lokiResps[0], nil), |
||||
next: mockHandler(lokiResps[1], nil), |
||||
now: time.Now, |
||||
MinShardingLookback: 0, |
||||
} |
||||
|
||||
req := defaultReq().WithStartEnd( |
||||
TimeToMillis(time.Now().Add(-time.Hour)), |
||||
TimeToMillis(time.Now().Add(time.Hour)), |
||||
) |
||||
|
||||
resp, err := splitter.Do(context.Background(), req) |
||||
require.Nil(t, err) |
||||
expected, err := lokiCodec.MergeResponse(lokiResps...) |
||||
require.Nil(t, err) |
||||
require.Equal(t, expected, resp) |
||||
} |
||||
|
||||
func Test_astMapper(t *testing.T) { |
||||
called := 0 |
||||
|
||||
handler := queryrange.HandlerFunc(func(ctx context.Context, req queryrange.Request) (queryrange.Response, error) { |
||||
resp := lokiResps[called] |
||||
called++ |
||||
return resp, nil |
||||
}) |
||||
|
||||
mware := newASTMapperware( |
||||
queryrange.ShardingConfigs{ |
||||
chunk.PeriodConfig{ |
||||
RowShards: 2, |
||||
}, |
||||
}, |
||||
handler, |
||||
log.NewNopLogger(), |
||||
nilShardingMetrics, |
||||
) |
||||
|
||||
resp, err := mware.Do(context.Background(), defaultReq().WithQuery(`{food="bar"}`)) |
||||
require.Nil(t, err) |
||||
|
||||
expected, err := lokiCodec.MergeResponse(lokiResps...) |
||||
sort.Sort(logproto.Streams(expected.(*LokiResponse).Data.Result)) |
||||
require.Nil(t, err) |
||||
require.Equal(t, called, 2) |
||||
require.Equal(t, expected.(*LokiResponse).Data, resp.(*LokiResponse).Data) |
||||
|
||||
} |
||||
|
||||
func Test_hasShards(t *testing.T) { |
||||
for i, tc := range []struct { |
||||
input queryrange.ShardingConfigs |
||||
expected bool |
||||
}{ |
||||
{ |
||||
input: queryrange.ShardingConfigs{ |
||||
{}, |
||||
}, |
||||
expected: false, |
||||
}, |
||||
{ |
||||
input: queryrange.ShardingConfigs{ |
||||
{RowShards: 16}, |
||||
}, |
||||
expected: true, |
||||
}, |
||||
{ |
||||
input: queryrange.ShardingConfigs{ |
||||
{}, |
||||
{RowShards: 16}, |
||||
{}, |
||||
}, |
||||
expected: true, |
||||
}, |
||||
{ |
||||
input: nil, |
||||
expected: false, |
||||
}, |
||||
} { |
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { |
||||
require.Equal(t, tc.expected, hasShards(tc.input)) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
// astmapper successful stream & prom conversion
|
||||
|
||||
func mockHandler(resp queryrange.Response, err error) queryrange.Handler { |
||||
return queryrange.HandlerFunc(func(ctx context.Context, req queryrange.Request) (queryrange.Response, error) { |
||||
if expired := ctx.Err(); expired != nil { |
||||
return nil, expired |
||||
} |
||||
|
||||
return resp, err |
||||
}) |
||||
} |
||||
Loading…
Reference in new issue