chore(engine): Wire up new execution engine (#17032)

This PR connects the already existing components of the new query engine and wires them up in the querier API.

The feature flag `-querier.engine.enable-v2-engine` controls whether supported queries should be executed using the new engine.

Note that this implementation currently does not execute any queries.


Signed-off-by: Christian Haudum <christian.haudum@gmail.com>
chaudum/metastore-caching
Christian Haudum 3 months ago committed by GitHub
parent ffa9656f7f
commit 1705f98a9f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 4
      docs/sources/shared/configuration.md
  2. 7
      integration/cluster/cluster.go
  3. 115
      pkg/engine/engine.go
  4. 86
      pkg/engine/engine_test.go
  5. 16
      pkg/engine/internal/types/operators.go
  6. 25
      pkg/engine/planner/logical/planner.go
  7. 75
      pkg/engine/planner/logical/planner_test.go
  8. 11
      pkg/engine/planner/physical/context.go
  9. 6
      pkg/engine/planner/physical/context_test.go
  10. 2
      pkg/engine/planner/physical/planner.go
  11. 2
      pkg/logcli/client/file.go
  12. 2
      pkg/logcli/query/query_test.go
  13. 2
      pkg/logql/bench/bench_test.go
  14. 42
      pkg/logql/engine.go
  15. 2
      pkg/logql/test_utils.go
  16. 25
      pkg/loki/modules.go
  17. 53
      pkg/querier/http.go
  18. 4
      pkg/querier/http_test.go
  19. 24
      pkg/querier/querier.go
  20. 4
      pkg/ruler/evaluator_local.go
  21. 2
      pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator.go

@ -4494,6 +4494,10 @@ engine:
# CLI flag: -querier.engine.max-count-min-sketch-heap-size
[max_count_min_sketch_heap_size: <int> | default = 10000]
# Experimental: Enable next generation query engine for supported queries.
# CLI flag: -querier.engine.enable-v2-engine
[enable_v2_engine: <boolean> | default = false]
# The maximum number of queries that can be simultaneously processed by the
# querier.
# CLI flag: -querier.max-concurrent

@ -73,10 +73,17 @@ limits_config:
attributes: [email]
storage_config:
# Legacy config
named_stores:
filesystem:
store-1:
directory: {{.sharedDataPath}}/fs-store-1
# Thanos config
object_store:
named_stores:
filesystem:
store-1:
dir: {{.sharedDataPath}}/fs-store-1
boltdb_shipper:
active_index_directory: {{.dataPath}}/boltdb-index
cache_location: {{.dataPath}}/boltdb-cache

@ -1,26 +1,95 @@
package engine
import "github.com/grafana/loki/v3/pkg/logql/syntax"
// canExecuteWithNewEngine determines whether a query can be executed by the new execution engine.
func canExecuteWithNewEngine(expr syntax.Expr) bool {
switch expr := expr.(type) {
case syntax.SampleExpr:
return false
case syntax.LogSelectorExpr:
ret := true
expr.Walk(func(e syntax.Expr) bool {
switch e.(type) {
case *syntax.LineParserExpr, *syntax.LogfmtParserExpr, *syntax.LogfmtExpressionParserExpr, *syntax.JSONExpressionParserExpr:
ret = false
case *syntax.LineFmtExpr, *syntax.LabelFmtExpr:
ret = false
case *syntax.KeepLabelsExpr, *syntax.DropLabelsExpr:
ret = false
}
return true
})
return ret
}
return false
import (
"context"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/grafana/loki/v3/pkg/dataobj/metastore"
"github.com/grafana/loki/v3/pkg/engine/planner/logical"
"github.com/grafana/loki/v3/pkg/engine/planner/physical"
"github.com/grafana/loki/v3/pkg/logql"
"github.com/grafana/loki/v3/pkg/logqlmodel"
utillog "github.com/grafana/loki/v3/pkg/util/log"
)
var (
ErrNotSupported = errors.New("feature not supported in new query engine")
)
// New creates a new instance of the query engine that implements the [logql.Engine] interface.
func New(opts logql.EngineOpts, metastore metastore.Metastore, limits logql.Limits, logger log.Logger) *QueryEngine {
return &QueryEngine{
logger: logger,
limits: limits,
metastore: metastore,
opts: opts,
}
}
// QueryEngine combines logical planning, physical planning, and execution to evaluate LogQL queries.
type QueryEngine struct {
logger log.Logger
limits logql.Limits
metastore metastore.Metastore
opts logql.EngineOpts
}
// Query implements [logql.Engine].
func (e *QueryEngine) Query(params logql.Params) logql.Query {
return &queryAdapter{
engine: e,
params: params,
}
}
// Execute executes a LogQL query and returns its results or alternatively an error.
// The execution is done in three steps:
// 1. Create a logical plan from the provided query parameters.
// 2. Create a physical plan from the logical plan using information from the catalog.
// 3. Evaluate the physical plan with the executor.
func (e *QueryEngine) Execute(ctx context.Context, params logql.Params) (logqlmodel.Result, error) {
var result logqlmodel.Result
logger := utillog.WithContext(ctx, e.logger)
logger = log.With(logger, "query", params.QueryString(), "engine", "v2")
logicalPlan, err := logical.BuildPlan(params)
if err != nil {
level.Warn(logger).Log("msg", "failed to create logical plan", "err", err)
return result, ErrNotSupported
}
executionContext := physical.NewContext(ctx, e.metastore, params.Start(), params.End())
planner := physical.NewPlanner(executionContext)
plan, err := planner.Build(logicalPlan)
if err != nil {
level.Warn(logger).Log("msg", "failed to create physical plan", "err", err)
return result, ErrNotSupported
}
_, err = planner.Optimize(plan)
if err != nil {
level.Warn(logger).Log("msg", "failed to optimize physical plan", "err", err)
return result, ErrNotSupported
}
// TODO(chaudum): Replace the return values with the actual return values from the execution.
level.Info(logger).Log("msg", "execute query with new engine", "query", params.QueryString())
return result, ErrNotSupported
}
var _ logql.Engine = (*QueryEngine)(nil)
// queryAdapter dispatches query execution to the wrapped engine.
type queryAdapter struct {
params logql.Params
engine *QueryEngine
}
// Exec implements [logql.Query].
func (q *queryAdapter) Exec(ctx context.Context) (logqlmodel.Result, error) {
return q.engine.Execute(ctx, q.params)
}
var _ logql.Query = (*queryAdapter)(nil)

@ -1,87 +1 @@
package engine
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/logql/syntax"
)
func TestCanExecuteWithNewEngine(t *testing.T) {
for _, tt := range []struct {
statement string
expected bool
}{
{
statement: `{env="prod"}`,
expected: true,
},
{
statement: `{env="prod"} |= "metrics.go"`,
expected: true,
},
{
statement: `{env="prod"} |= "metrics.go"`,
expected: true,
},
{
statement: `{env="prod"} | tenant="loki"`,
expected: true,
},
{
statement: `{env="prod"} | tenant="loki" != "foo"`,
expected: true,
},
{
statement: `{env="prod"} | json`,
expected: false,
},
{
statement: `{env="prod"} | json foo="bar"`,
expected: false,
},
{
statement: `{env="prod"} | logfmt`,
expected: false,
},
{
statement: `{env="prod"} | logfmt foo="bar"`,
expected: false,
},
{
statement: `{env="prod"} | pattern "<_> foo=<foo> <_>"`,
expected: false,
},
{
statement: `{env="prod"} | regexp ".* foo=(?P<foo>.+) .*"`,
expected: false,
},
{
statement: `{env="prod"} | unpack`,
expected: false,
},
{
statement: `{env="prod"} |= "metrics.go" | logfmt`,
expected: false,
},
{
statement: `{env="prod"} | line_format "{.cluster}"`,
expected: false,
},
{
statement: `{env="prod"} | label_format cluster="us"`,
expected: false,
},
{
statement: `sum(rate({env="prod"}[1m]))`,
expected: false,
},
} {
t.Run(tt.statement, func(t *testing.T) {
expr := syntax.MustParseExpr(tt.statement)
canExecute := canExecuteWithNewEngine(expr)
require.Equal(t, tt.expected, canExecute)
})
}
}

@ -53,12 +53,12 @@ const (
BinaryOpDiv // Division operation (/).
BinaryOpMod // Modulo operation (%).
BinaryOpMatchStr // String matching operation (|=).
BinaryOpNotMatchStr // String non-matching operation (!=).
BinaryOpMatchRe // Regular expression matching operation (|~).
BinaryOpNotMatchRe // Regular expression non-matching operation (!~).
BinaryOpMatchPattern // Pattern matching operation (|>).
BinaryOpNotMatchPattern // Pattern non-matching operation (!>).
BinaryOpMatchSubstr // Substring matching operation (|=). Used for string match filter.
BinaryOpNotMatchSubstr // Substring non-matching operation (!=). Used for string match filter.
BinaryOpMatchRe // Regular expression matching operation (|~). Used for regex match filter and label matcher.
BinaryOpNotMatchRe // Regular expression non-matching operation (!~). Used for regex match filter and label matcher.
BinaryOpMatchPattern // Pattern matching operation (|>). Used for pattern match filter.
BinaryOpNotMatchPattern // Pattern non-matching operation (!>). Use for pattern match filter.
)
// String returns a human-readable representation of the binary operation kind.
@ -96,9 +96,9 @@ func (t BinaryOp) String() string {
return "DIV"
case BinaryOpMod:
return "MOD"
case BinaryOpMatchStr:
case BinaryOpMatchSubstr:
return "MATCH_STR"
case BinaryOpNotMatchStr:
case BinaryOpNotMatchSubstr:
return "NOT_MATCH_STR" // convenience for NOT(MATCH_STR(...))
case BinaryOpMatchRe:
return "MATCH_RE"

@ -1,6 +1,7 @@
package logical
import (
"errors"
"fmt"
"github.com/prometheus/prometheus/model/labels"
@ -12,6 +13,8 @@ import (
"github.com/grafana/loki/v3/pkg/logql/syntax"
)
var errUnimplemented = errors.New("query contains unimplemented features")
// BuildPlan converts a LogQL query represented as [logql.Params] into a logical [Plan].
// It may return an error as second argument in case the traversal of the AST of the query fails.
func BuildPlan(query logql.Params) (*Plan, error) {
@ -24,6 +27,18 @@ func BuildPlan(query logql.Params) (*Plan, error) {
expr := query.GetExpression()
expr.Walk(func(e syntax.Expr) bool {
switch e := e.(type) {
case syntax.SampleExpr:
err = errUnimplemented
return false // do not traverse children
case *syntax.LineParserExpr, *syntax.LogfmtParserExpr, *syntax.LogfmtExpressionParserExpr, *syntax.JSONExpressionParserExpr:
err = errUnimplemented
return false // do not traverse children
case *syntax.LineFmtExpr, *syntax.LabelFmtExpr:
err = errUnimplemented
return false // do not traverse children
case *syntax.KeepLabelsExpr, *syntax.DropLabelsExpr:
err = errUnimplemented
return false // do not traverse children
case *syntax.MatchersExpr:
selector = convertLabelMatchers(e.Matchers())
case *syntax.LineFilterExpr:
@ -139,9 +154,9 @@ func convertLineFilter(filter syntax.LineFilter) Value {
func convertLineMatchType(op log.LineMatchType) types.BinaryOp {
switch op {
case log.LineMatchEqual:
return types.BinaryOpMatchStr
return types.BinaryOpMatchSubstr
case log.LineMatchNotEqual:
return types.BinaryOpNotMatchStr
return types.BinaryOpNotMatchSubstr
case log.LineMatchRegexp:
return types.BinaryOpMatchRe
case log.LineMatchNotRegexp:
@ -166,9 +181,9 @@ func logColumnRef() *ColumnRef {
func convertLabelMatchType(op labels.MatchType) types.BinaryOp {
switch op {
case labels.MatchEqual:
return types.BinaryOpMatchStr
return types.BinaryOpMatchSubstr
case labels.MatchNotEqual:
return types.BinaryOpNotMatchStr
return types.BinaryOpNotMatchSubstr
case labels.MatchRegexp:
return types.BinaryOpMatchRe
case labels.MatchNotRegexp:
@ -182,7 +197,7 @@ func convertLabelFilter(expr log.LabelFilterer) (Value, error) {
switch e := expr.(type) {
case *log.BinaryLabelFilter:
op := types.BinaryOpOr
if e.And == true {
if e.And {
op = types.BinaryOpAnd
}
left, err := convertLabelFilter(e.Left)

@ -120,15 +120,84 @@ RETURN %20
t.Logf("\n%s\n", sb.String())
}
func TestConvertAST_UnsupportedFeature(t *testing.T) {
func TestCanExecuteQuery(t *testing.T) {
for _, tt := range []struct {
statement string
expected bool
}{
{
statement: `{env="prod"}`,
expected: true,
},
{
statement: `{env="prod"} |= "metrics.go"`,
expected: true,
},
{
statement: `{env="prod"} |= "metrics.go"`,
expected: true,
},
{
statement: `{env="prod"} | tenant="loki"`,
expected: true,
},
{
statement: `{env="prod"} | tenant="loki" != "foo"`,
expected: true,
},
{
statement: `{env="prod"} | json`,
},
{
statement: `{env="prod"} | json foo="bar"`,
},
{
statement: `{env="prod"} | logfmt`,
},
{
statement: `{env="prod"} | logfmt foo="bar"`,
},
{
statement: `{env="prod"} | pattern "<_> foo=<foo> <_>"`,
},
{
statement: `{env="prod"} | regexp ".* foo=(?P<foo>.+) .*"`,
},
{
statement: `{env="prod"} | unpack`,
},
{
statement: `{env="prod"} |= "metrics.go" | logfmt`,
},
{
statement: `{env="prod"} | line_format "{.cluster}"`,
},
{
statement: `{env="prod"} | label_format cluster="us"`,
},
{
statement: `{env="prod"} |= "metric.go" | retry > 2`,
},
{
statement: `sum(rate({env="prod"}[1m]))`,
},
} {
t.Run(tt.statement, func(t *testing.T) {
q := &query{
statement: `{cluster="prod", namespace=~"loki-.*"} |= "metric.go" | retry > 2`,
statement: tt.statement,
start: 1000,
end: 2000,
direction: logproto.FORWARD,
limit: 1000,
}
logicalPlan, err := BuildPlan(q)
if tt.expected {
require.NoError(t, err)
} else {
require.Nil(t, logicalPlan)
require.ErrorContains(t, err, "failed to convert AST into logical plan: not implemented: *log.NumericLabelFilter")
require.ErrorContains(t, err, "failed to convert AST into logical plan")
}
})
}
}

@ -2,6 +2,7 @@ package physical
import (
"context"
"errors"
"fmt"
"time"
@ -13,8 +14,8 @@ import (
var (
binOpToMatchTypeMapping = map[types.BinaryOp]labels.MatchType{
types.BinaryOpMatchStr: labels.MatchEqual,
types.BinaryOpNotMatchStr: labels.MatchNotEqual,
types.BinaryOpEq: labels.MatchEqual,
types.BinaryOpNeq: labels.MatchNotEqual,
types.BinaryOpMatchRe: labels.MatchRegexp,
types.BinaryOpNotMatchRe: labels.MatchNotRegexp,
}
@ -49,6 +50,10 @@ func NewContext(ctx context.Context, ms metastore.Metastore, from, through time.
// [Expression]. The expression is required to be a (tree of) [BinaryExpression]
// with a [ColumnExpression] on the left and a [LiteralExpression] on the right.
func (c *Context) ResolveDataObj(selector Expression) ([]DataObjLocation, [][]int64, error) {
if c.metastore == nil {
return nil, nil, errors.New("no metastore to resolve objects")
}
matchers, err := expressionToMatchers(selector)
if err != nil {
return nil, nil, fmt.Errorf("failed to convert selector expression into matchers: %w", err)
@ -84,7 +89,7 @@ func expressionToMatchers(selector Expression) ([]*labels.Matcher, error) {
return nil, err
}
return append(lhs, rhs...), nil
case types.BinaryOpMatchStr, types.BinaryOpNotMatchStr, types.BinaryOpMatchRe, types.BinaryOpNotMatchRe:
case types.BinaryOpEq, types.BinaryOpNeq, types.BinaryOpMatchRe, types.BinaryOpNotMatchRe:
op, err := convertBinaryOp(expr.Op)
if err != nil {
return nil, err

@ -121,7 +121,7 @@ func TestContext_ExpressionToMatchers(t *testing.T) {
expr: &BinaryExpr{
Left: newColumnExpr("foo", types.ColumnTypeLabel),
Right: NewLiteral("bar"),
Op: types.BinaryOpMatchStr,
Op: types.BinaryOpEq,
},
want: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"),
@ -132,12 +132,12 @@ func TestContext_ExpressionToMatchers(t *testing.T) {
Left: &BinaryExpr{
Left: newColumnExpr("foo", types.ColumnTypeLabel),
Right: NewLiteral("bar"),
Op: types.BinaryOpMatchStr,
Op: types.BinaryOpEq,
},
Right: &BinaryExpr{
Left: newColumnExpr("bar", types.ColumnTypeLabel),
Right: NewLiteral("baz"),
Op: types.BinaryOpNotMatchStr,
Op: types.BinaryOpNeq,
},
Op: types.BinaryOpAnd,
},

@ -178,7 +178,7 @@ func (p *Planner) Optimize(plan *Plan) (*Plan, error) {
optimizer := newOptimizer(plan, optimizations)
optimizer.optimize(root)
if i == 1 {
return nil, errors.New("physcial plan must only have exactly one root node")
return nil, errors.New("physical plan must only have exactly one root node")
}
}
return plan, nil

@ -42,7 +42,7 @@ type FileClient struct {
labels []string
labelValues []string
orgID string
engine *logql.Engine
engine logql.Engine
}
// NewFileClient returns the new instance of FileClient for the given `io.ReadCloser`

@ -406,7 +406,7 @@ func Test_batch(t *testing.T) {
}
type testQueryClient struct {
engine *logql.Engine
engine *logql.QueryEngine
queryRangeCalls int
}

@ -25,7 +25,7 @@ const testTenant = "test-tenant"
// setupBenchmarkWithStore sets up the benchmark environment with the specified store type
// and returns the necessary components
func setupBenchmarkWithStore(tb testing.TB, storeType string) (*logql.Engine, *GeneratorConfig) {
func setupBenchmarkWithStore(tb testing.TB, storeType string) (*logql.QueryEngine, *GeneratorConfig) {
tb.Helper()
entries, err := os.ReadDir(DefaultDataDir)
if err != nil || len(entries) == 0 {

@ -139,6 +139,10 @@ type Querier interface {
SelectSamples(context.Context, SelectSampleParams) (iter.SampleIterator, error)
}
type Engine interface {
Query(Params) Query
}
// EngineOpts is the list of options to use with the LogQL query engine.
type EngineOpts struct {
// MaxLookBackPeriod is the maximum amount of time to look back for log lines.
@ -151,21 +155,15 @@ type EngineOpts struct {
// MaxCountMinSketchHeapSize is the maximum number of labels the heap for a topk query using a count min sketch
// can track. This impacts the memory usage and accuracy of a sharded probabilistic topk query.
MaxCountMinSketchHeapSize int `yaml:"max_count_min_sketch_heap_size"`
// Enable the next generation Loki Query Engine for supported queries.
EnableV2Engine bool `yaml:"enable_v2_engine" category:"experimental"`
}
func (opts *EngineOpts) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.DurationVar(
&opts.MaxLookBackPeriod,
prefix+".engine.max-lookback-period",
30*time.Second,
"The maximum amount of time to look back for log lines. Used only for instant log queries.",
)
f.IntVar(
&opts.MaxCountMinSketchHeapSize,
prefix+".engine.max-count-min-sketch-heap-size",
10_000,
"The maximum number of labels the heap of a topk query using a count min sketch can track.",
)
f.DurationVar(&opts.MaxLookBackPeriod, prefix+"max-lookback-period", 30*time.Second, "The maximum amount of time to look back for log lines. Used only for instant log queries.")
f.IntVar(&opts.MaxCountMinSketchHeapSize, prefix+"max-count-min-sketch-heap-size", 10_000, "The maximum number of labels the heap of a topk query using a count min sketch can track.")
f.BoolVar(&opts.EnableV2Engine, prefix+"enable-v2-engine", false, "Experimental: Enable next generation query engine for supported queries.")
// Log executing query by default
opts.LogExecutingQuery = true
}
@ -176,21 +174,21 @@ func (opts *EngineOpts) applyDefault() {
}
}
// Engine is the LogQL engine.
type Engine struct {
// QueryEngine is the LogQL engine.
type QueryEngine struct {
logger log.Logger
evaluatorFactory EvaluatorFactory
limits Limits
opts EngineOpts
}
// NewEngine creates a new LogQL Engine.
func NewEngine(opts EngineOpts, q Querier, l Limits, logger log.Logger) *Engine {
// NewEngine creates a new LogQL [QueryEngine].
func NewEngine(opts EngineOpts, q Querier, l Limits, logger log.Logger) *QueryEngine {
opts.applyDefault()
if logger == nil {
logger = log.NewNopLogger()
}
return &Engine{
return &QueryEngine{
logger: logger,
evaluatorFactory: NewDefaultEvaluator(q, opts.MaxLookBackPeriod, opts.MaxCountMinSketchHeapSize),
limits: l,
@ -199,14 +197,14 @@ func NewEngine(opts EngineOpts, q Querier, l Limits, logger log.Logger) *Engine
}
// Query creates a new LogQL query. Instant/Range type is derived from the parameters.
func (ng *Engine) Query(params Params) Query {
func (qe *QueryEngine) Query(params Params) Query {
return &query{
logger: ng.logger,
logger: qe.logger,
params: params,
evaluator: ng.evaluatorFactory,
evaluator: qe.evaluatorFactory,
record: true,
logExecQuery: ng.opts.LogExecutingQuery,
limits: ng.limits,
logExecQuery: qe.opts.LogExecutingQuery,
limits: qe.limits,
}
}

@ -235,7 +235,7 @@ outer:
}
type MockDownstreamer struct {
*Engine
*QueryEngine
}
func (m MockDownstreamer) Downstreamer(_ context.Context) Downstreamer { return m }

@ -613,7 +613,12 @@ func (t *Loki) initQuerier() (services.Service, error) {
serverutil.ResponseJSONMiddleware(),
}
t.querierAPI = querier.NewQuerierAPI(t.Cfg.Querier, t.Querier, t.Overrides, logger)
store, err := t.createDataObjBucket("dataobj-querier")
if err != nil {
return nil, err
}
t.querierAPI = querier.NewQuerierAPI(t.Cfg.Querier, t.Querier, t.Overrides, metastore.NewObjectMetastore(store), logger)
indexStatsHTTPMiddleware := querier.WrapQuerySpanAndTimeout("query.IndexStats", t.Overrides)
indexShardsHTTPMiddleware := querier.WrapQuerySpanAndTimeout("query.IndexShards", t.Overrides)
@ -1529,7 +1534,7 @@ func (t *Loki) initRuleEvaluator() (services.Service, error) {
break
}
var engine *logql.Engine
var engine *logql.QueryEngine
engine, err = t.createRulerQueryEngine(logger, deleteStore)
if err != nil {
break
@ -2120,8 +2125,20 @@ func (t *Loki) createDataObjBucket(clientName string) (objstore.Bucket, error) {
if err != nil {
return nil, fmt.Errorf("failed to get schema for now: %w", err)
}
// Handle named stores
cfg := t.Cfg.StorageConfig.ObjectStore
backend := schema.ObjectType
if st, ok := cfg.NamedStores.LookupStoreType(schema.ObjectType); ok {
backend = st
// override config with values from named store config
if err := cfg.NamedStores.OverrideConfig(&cfg.Config, schema.ObjectType); err != nil {
return nil, err
}
}
var objstoreBucket objstore.Bucket
objstoreBucket, err = bucket.NewClient(context.Background(), schema.ObjectType, t.Cfg.StorageConfig.ObjectStore.Config, clientName, util_log.Logger)
objstoreBucket, err = bucket.NewClient(context.Background(), backend, cfg.Config, clientName, util_log.Logger)
if err != nil {
return nil, err
}
@ -2165,7 +2182,7 @@ func (t *Loki) deleteRequestsClient(clientType string, limits limiter.CombinedLi
return deletion.NewPerTenantDeleteRequestsClient(client, limits), nil
}
func (t *Loki) createRulerQueryEngine(logger log.Logger, deleteStore deletion.DeleteRequestsClient) (eng *logql.Engine, err error) {
func (t *Loki) createRulerQueryEngine(logger log.Logger, deleteStore deletion.DeleteRequestsClient) (eng *logql.QueryEngine, err error) {
querierStore, err := t.getQuerierStore()
if err != nil {
return nil, err

@ -17,6 +17,8 @@ import (
"github.com/grafana/dskit/tenant"
"github.com/grafana/loki/v3/pkg/dataobj/metastore"
"github.com/grafana/loki/v3/pkg/engine"
"github.com/grafana/loki/v3/pkg/loghttp"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql"
@ -27,7 +29,7 @@ import (
"github.com/grafana/loki/v3/pkg/querier/queryrange"
index_stats "github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
"github.com/grafana/loki/v3/pkg/util/httpreq"
util_log "github.com/grafana/loki/v3/pkg/util/log"
utillog "github.com/grafana/loki/v3/pkg/util/log"
serverutil "github.com/grafana/loki/v3/pkg/util/server"
"github.com/grafana/loki/v3/pkg/util/spanlogger"
util_validation "github.com/grafana/loki/v3/pkg/util/validation"
@ -38,41 +40,56 @@ type QueryResponse struct {
Result parser.Value `json:"result"`
}
type Engine interface {
Query(logql.Params) logql.Query
}
// nolint // QuerierAPI defines HTTP handler functions for the querier.
type QuerierAPI struct {
querier Querier
cfg Config
limits querier_limits.Limits
engine Engine
engineV1 logql.Engine // Loki's current query engine
engineV2 logql.Engine // Loki's next generation query engine
logger log.Logger
}
// NewQuerierAPI returns an instance of the QuerierAPI.
func NewQuerierAPI(cfg Config, querier Querier, limits querier_limits.Limits, logger log.Logger) *QuerierAPI {
engine := logql.NewEngine(cfg.Engine, querier, limits, logger)
func NewQuerierAPI(cfg Config, querier Querier, limits querier_limits.Limits, metastore metastore.Metastore, logger log.Logger) *QuerierAPI {
return &QuerierAPI{
cfg: cfg,
limits: limits,
querier: querier,
engine: engine,
engineV1: logql.NewEngine(cfg.Engine, querier, limits, logger),
engineV2: engine.New(cfg.Engine, metastore, limits, logger),
logger: logger,
}
}
// RangeQueryHandler is a http.HandlerFunc for range queries and legacy log queries
func (q *QuerierAPI) RangeQueryHandler(ctx context.Context, req *queryrange.LokiRequest) (logqlmodel.Result, error) {
var result logqlmodel.Result
logger := utillog.WithContext(ctx, q.logger)
if err := q.validateMaxEntriesLimits(ctx, req.Plan.AST, req.Limit); err != nil {
return logqlmodel.Result{}, err
return result, err
}
params, err := queryrange.ParamsFromRequest(req)
if err != nil {
return logqlmodel.Result{}, err
return result, err
}
if q.cfg.Engine.EnableV2Engine {
query := q.engineV2.Query(params)
result, err = query.Exec(ctx)
if err == nil {
return result, err
}
if !errors.Is(err, engine.ErrNotSupported) {
level.Error(logger).Log("msg", "query execution failed with new query engine", "err", err)
return result, errors.Wrap(err, "failed with new execution engine")
}
level.Warn(logger).Log("msg", "falling back to legacy query engine", "err", err)
}
query := q.engine.Query(params)
query := q.engineV1.Query(params)
return query.Exec(ctx)
}
@ -91,7 +108,7 @@ func (q *QuerierAPI) InstantQueryHandler(ctx context.Context, req *queryrange.Lo
if err != nil {
return logqlmodel.Result{}, err
}
query := q.engine.Query(params)
query := q.engineV1.Query(params)
return query.Exec(ctx)
}
@ -116,7 +133,7 @@ func (q *QuerierAPI) LabelHandler(ctx context.Context, req *logproto.LabelReques
}
status, _ := serverutil.ClientHTTPStatusAndError(err)
logql.RecordLabelQueryMetrics(ctx, util_log.Logger, *req.Start, *req.End, req.Name, req.Query, strconv.Itoa(status), statResult)
logql.RecordLabelQueryMetrics(ctx, utillog.Logger, *req.Start, *req.End, req.Name, req.Query, strconv.Itoa(status), statResult)
return resp, err
}
@ -144,7 +161,7 @@ func (q *QuerierAPI) SeriesHandler(ctx context.Context, req *logproto.SeriesRequ
}
status, _ := serverutil.ClientHTTPStatusAndError(err)
logql.RecordSeriesQueryMetrics(ctx, util_log.Logger, req.Start, req.End, req.Groups, strconv.Itoa(status), req.GetShards(), statResult)
logql.RecordSeriesQueryMetrics(ctx, utillog.Logger, req.Start, req.End, req.Groups, strconv.Itoa(status), req.GetShards(), statResult)
return resp, statResult, err
}
@ -171,7 +188,7 @@ func (q *QuerierAPI) IndexStatsHandler(ctx context.Context, req *loghttp.RangeQu
}
status, _ := serverutil.ClientHTTPStatusAndError(err)
logql.RecordStatsQueryMetrics(ctx, util_log.Logger, req.Start, req.End, req.Query, strconv.Itoa(status), statResult)
logql.RecordStatsQueryMetrics(ctx, utillog.Logger, req.Start, req.End, req.Query, strconv.Itoa(status), statResult)
return resp, err
}
@ -200,7 +217,7 @@ func (q *QuerierAPI) IndexShardsHandler(ctx context.Context, req *loghttp.RangeQ
status, _ := serverutil.ClientHTTPStatusAndError(err)
logql.RecordShardsQueryMetrics(
ctx, util_log.Logger, req.Start, req.End, req.Query, targetBytesPerShard, strconv.Itoa(status), resLength, statResult,
ctx, utillog.Logger, req.Start, req.End, req.Query, targetBytesPerShard, strconv.Itoa(status), resLength, statResult,
)
return resp, err
@ -232,7 +249,7 @@ func (q *QuerierAPI) VolumeHandler(ctx context.Context, req *logproto.VolumeRequ
}
status, _ := serverutil.ClientHTTPStatusAndError(err)
logql.RecordVolumeQueryMetrics(ctx, util_log.Logger, req.From.Time(), req.Through.Time(), req.GetQuery(), uint32(req.GetLimit()), time.Duration(req.GetStep()), strconv.Itoa(status), statResult)
logql.RecordVolumeQueryMetrics(ctx, utillog.Logger, req.From.Time(), req.Through.Time(), req.GetQuery(), uint32(req.GetLimit()), time.Duration(req.GetStep()), strconv.Itoa(status), statResult)
return resp, nil
}

@ -28,7 +28,7 @@ func TestInstantQueryHandler(t *testing.T) {
require.NoError(t, err)
t.Run("log selector expression not allowed for instant queries", func(t *testing.T) {
api := NewQuerierAPI(mockQuerierConfig(), nil, limits, log.NewNopLogger())
api := NewQuerierAPI(mockQuerierConfig(), nil, limits, nil, log.NewNopLogger())
ctx := user.InjectOrgID(context.Background(), "user")
req, err := http.NewRequestWithContext(ctx, "GET", `/api/v1/query`, nil)
@ -268,6 +268,6 @@ func makeRequest(t *testing.T, handler http.Handler, req *http.Request) *httptes
}
func setupAPI(querier *querierMock) *QuerierAPI {
api := NewQuerierAPI(Config{}, querier, nil, log.NewNopLogger())
api := NewQuerierAPI(Config{}, querier, nil, nil, log.NewNopLogger())
return api
}

@ -66,16 +66,20 @@ type Config struct {
// RegisterFlags register flags.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.Engine.RegisterFlagsWithPrefix("querier", f)
f.DurationVar(&cfg.TailMaxDuration, "querier.tail-max-duration", 1*time.Hour, "Maximum duration for which the live tailing requests are served.")
f.DurationVar(&cfg.ExtraQueryDelay, "querier.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.")
f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 3*time.Hour, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.")
f.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 4, "The maximum number of queries that can be simultaneously processed by the querier.")
f.BoolVar(&cfg.QueryStoreOnly, "querier.query-store-only", false, "Only query the store, and not attempt any ingesters. This is useful for running a standalone querier pool operating only against stored data.")
f.BoolVar(&cfg.QueryIngesterOnly, "querier.query-ingester-only", false, "When true, queriers only query the ingesters, and not stored data. This is useful when the object store is unavailable.")
f.BoolVar(&cfg.MultiTenantQueriesEnabled, "querier.multi-tenant-queries-enabled", false, "When true, allow queries to span multiple tenants.")
f.BoolVar(&cfg.PerRequestLimitsEnabled, "querier.per-request-limits-enabled", false, "When true, querier limits sent via a header are enforced.")
f.BoolVar(&cfg.QueryPartitionIngesters, "querier.query-partition-ingesters", false, "When true, querier directs ingester queries to the partition-ingesters instead of the normal ingesters.")
cfg.RegisterFlagsWithPrefix("querier.", f)
}
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.DurationVar(&cfg.TailMaxDuration, prefix+"tail-max-duration", 1*time.Hour, "Maximum duration for which the live tailing requests are served.")
f.DurationVar(&cfg.ExtraQueryDelay, prefix+"extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.")
f.DurationVar(&cfg.QueryIngestersWithin, prefix+"query-ingesters-within", 3*time.Hour, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.")
cfg.Engine.RegisterFlagsWithPrefix(prefix+"engine.", f)
f.IntVar(&cfg.MaxConcurrent, prefix+"max-concurrent", 4, "The maximum number of queries that can be simultaneously processed by the querier.")
f.BoolVar(&cfg.QueryStoreOnly, prefix+"query-store-only", false, "Only query the store, and not attempt any ingesters. This is useful for running a standalone querier pool operating only against stored data.")
f.BoolVar(&cfg.QueryIngesterOnly, prefix+"query-ingester-only", false, "When true, queriers only query the ingesters, and not stored data. This is useful when the object store is unavailable.")
f.BoolVar(&cfg.MultiTenantQueriesEnabled, prefix+"multi-tenant-queries-enabled", false, "When true, allow queries to span multiple tenants.")
f.BoolVar(&cfg.PerRequestLimitsEnabled, prefix+"per-request-limits-enabled", false, "When true, querier limits sent via a header are enforced.")
f.BoolVar(&cfg.QueryPartitionIngesters, prefix+"query-partition-ingesters", false, "When true, querier directs ingester queries to the partition-ingesters instead of the normal ingesters.")
}
// Validate validates the config.

@ -18,7 +18,7 @@ import (
const EvalModeLocal = "local"
type LocalEvaluator struct {
engine *logql.Engine
engine *logql.QueryEngine
logger log.Logger
// we don't want/need to log all the additional context, such as
@ -27,7 +27,7 @@ type LocalEvaluator struct {
insightsLogger log.Logger
}
func NewLocalEvaluator(engine *logql.Engine, logger log.Logger) (*LocalEvaluator, error) {
func NewLocalEvaluator(engine *logql.QueryEngine, logger log.Logger) (*LocalEvaluator, error) {
if engine == nil {
return nil, fmt.Errorf("given engine is nil")
}

@ -44,7 +44,7 @@ func ForEachSeries(ctx context.Context, bucket *bbolt.Bucket, config config.Peri
if len(current.Chunks()) == 0 {
current.Reset(ref.SeriesID, ref.UserID, labelsMapper.Get(ref.SeriesID, ref.UserID))
} else if bytes.Compare(current.UserID(), ref.UserID) != 0 || bytes.Compare(current.SeriesID(), ref.SeriesID) != 0 {
} else if !bytes.Equal(current.UserID(), ref.UserID) || !bytes.Equal(current.SeriesID(), ref.SeriesID) {
err = callback(current)
if err != nil {
return err

Loading…
Cancel
Save