LogQL: Vector and Range Vector Aggregation.

- adds avg,min,max,top,bottomk,stddev,stdvar,count
- updates api documentation
- adds tests

Improve yacc & go lexer to understand duration
Remove support for regexp in all queries
Clean up querier and logselector
pull/969/head
Cyril Tovena 7 years ago
parent 8f04545c1e
commit 7f3db9d7f0
  1. 16
      cmd/logcli/client.go
  2. 1
      cmd/logcli/main.go
  3. 176
      docs/api.md
  4. 61
      docs/usage.md
  5. 2
      pkg/ingester/ingester.go
  6. 24
      pkg/ingester/ingester_test.go
  7. 42
      pkg/ingester/instance.go
  8. 49
      pkg/ingester/tailer.go
  9. 90
      pkg/iter/iterator.go
  10. 65
      pkg/iter/iterator_test.go
  11. 275
      pkg/logproto/logproto.pb.go
  12. 10
      pkg/logproto/logproto.proto
  13. 224
      pkg/logql/ast.go
  14. 28
      pkg/logql/ast_test.go
  15. 502
      pkg/logql/engine.go
  16. 111
      pkg/logql/expr.y
  17. 417
      pkg/logql/expr.y.go
  18. 99
      pkg/logql/parser.go
  19. 293
      pkg/logql/parser_test.go
  20. 129
      pkg/logql/range_vector.go
  21. 138
      pkg/logql/range_vector_test.go
  22. 65
      pkg/logql/vector.go
  23. 4
      pkg/loki/modules.go
  24. 139
      pkg/querier/http.go
  25. 75
      pkg/querier/querier.go
  26. 3
      pkg/querier/querier_mock_test.go
  27. 14
      pkg/querier/querier_test.go
  28. 60
      pkg/storage/store.go
  29. 18
      pkg/storage/store_test.go
  30. 6
      pkg/storage/util_test.go
  31. 14
      pkg/util/conv.go

@ -18,20 +18,19 @@ import (
)
const (
queryPath = "/api/prom/query?query=%s&limit=%d&start=%d&end=%d&direction=%s&regexp=%s"
queryPath = "/api/prom/query?query=%s&limit=%d&start=%d&end=%d&direction=%s"
labelsPath = "/api/prom/label"
labelValuesPath = "/api/prom/label/%s/values"
tailPath = "/api/prom/tail?query=%s&regexp=%s&delay_for=%d&limit=%d&start=%d"
tailPath = "/api/prom/tail?query=%s&delay_for=%d&limit=%d&start=%d"
)
func query(from, through time.Time, direction logproto.Direction) (*logproto.QueryResponse, error) {
path := fmt.Sprintf(queryPath,
url.QueryEscape(*queryStr), // query
*limit, // limit
from.UnixNano(), // start
through.UnixNano(), // end
direction.String(), // direction
url.QueryEscape(*regexpStr), // regexp
url.QueryEscape(*queryStr), // query
*limit, // limit
from.UnixNano(), // start
through.UnixNano(), // end
direction.String(), // direction
)
var resp logproto.QueryResponse
@ -113,7 +112,6 @@ func doRequest(path string, out interface{}) error {
func liveTailQueryConn() (*websocket.Conn, error) {
path := fmt.Sprintf(tailPath,
url.QueryEscape(*queryStr), // query
url.QueryEscape(*regexpStr), // regexp
*delayFor, // delay_for
*limit, // limit
getStart(time.Now()).UnixNano(), // start

@ -27,7 +27,6 @@ var (
queryCmd = app.Command("query", "Run a LogQL query.")
queryStr = queryCmd.Arg("query", "eg '{foo=\"bar\",baz=\"blip\"}'").Required().String()
regexpStr = queryCmd.Arg("regex", "").String()
limit = queryCmd.Flag("limit", "Limit on number of entries to print.").Default("30").Int()
since = queryCmd.Flag("since", "Lookback window.").Default("1h").Duration()
from = queryCmd.Flag("from", "Start looking for logs at this absolute time (inclusive)").String()

@ -20,6 +20,180 @@ The Loki server has the following API endpoints (_Note:_ Authentication is out o
}
]
}
```
- `GET /api/v1/query`
For doing instant queries at a single point in time, accepts the following parameters in the query-string:
- `query`: a logQL query
- `limit`: max number of entries to return (not used for sample expression)
- `time`: the evaluation time for the query, as a nanosecond Unix epoch (nanoseconds since 1970). Default is always now.
- `direction`: `forward` or `backward`, useful when specifying a limit. Default is backward.
Loki needs to query the index store in order to find log streams for particular labels and the store is spread out by time,
so you need to specify the time and labels accordingly. Querying a long time into the history will cause additional
load to the index server and make the query slower.
Responses looks like this:
```json
{
"resultType": "vector" | "streams",
"result": <value>
}
```
Examples:
```bash
$ curl -G -s "http://localhost:3100/api/v1/query" --data-urlencode 'query=sum(rate({job="varlogs"}[10m])) by (level)' | jq
{
"resultType": "vector",
"result": [
{
"metric": {},
"value": [
1559848867745737,
"1267.1266666666666"
]
},
{
"metric": {
"level": "warn"
},
"value": [
1559848867745737,
"37.77166666666667"
]
},
{
"metric": {
"level": "info"
},
"value": [
1559848867745737,
"37.69"
]
}
]
}
```
```bash
curl -G -s "http://localhost:3100/api/v1/query" --data-urlencode 'query={job="varlogs"}' | jq
{
"resultType": "streams",
"result": [
{
"labels": "{filename=\"/var/log/myproject.log\", job=\"varlogs\", level=\"info\"}",
"entries": [
{
"ts": "2019-06-06T19:25:41.972739Z",
"line": "foo"
},
{
"ts": "2019-06-06T19:25:41.972722Z",
"line": "bar"
}
]
}
]
```
- `GET /api/v1/query_range`
For doing queries over a range of time, accepts the following parameters in the query-string:
- `query`: a logQL query
- `limit`: max number of entries to return (not used for sample expression)
- `start`: the start time for the query, as a nanosecond Unix epoch (nanoseconds since 1970). Default is always one hour ago.
- `end`: the end time for the query, as a nanosecond Unix epoch (nanoseconds since 1970). Default is always now.
- `step`: query resolution step width in seconds. Default 1 second.
- `direction`: `forward` or `backward`, useful when specifying a limit. Default is backward.
Loki needs to query the index store in order to find log streams for particular labels and the store is spread out by time,
so you need to specify the time and labels accordingly. Querying a long time into the history will cause additional
load to the index server and make the query slower.
Responses looks like this:
```json
{
"resultType": "matrix" | "streams",
"result": <value>
}
```
Examples:
```bash
$ curl -G -s "http://localhost:3100/api/v1/query_range" --data-urlencode 'query=sum(rate({job="varlogs"}[10m])) by (level)' --data-urlencode 'step=300' | jq
{
"resultType": "matrix",
"result": [
{
"metric": {
"level": "info"
},
"values": [
[
1559848958663735,
"137.95"
],
[
1559849258663735,
"467.115"
],
[
1559849558663735,
"658.8516666666667"
]
]
},
{
"metric": {
"level": "warn"
},
"values": [
[
1559848958663735,
"137.27833333333334"
],
[
1559849258663735,
"467.69"
],
[
1559849558663735,
"660.6933333333334"
]
]
}
]
}
```
```bash
curl -G -s "http://localhost:3100/api/v1/query_range" --data-urlencode 'query={job="varlogs"}' | jq
{
"resultType": "streams",
"result": [
{
"labels": "{filename=\"/var/log/myproject.log\", job=\"varlogs\", level=\"info\"}",
"entries": [
{
"ts": "2019-06-06T19:25:41.972739Z",
"line": "foo"
},
{
"ts": "2019-06-06T19:25:41.972722Z",
"line": "bar"
}
]
}
]
```
- `GET /api/prom/query`
@ -37,6 +211,8 @@ The Loki server has the following API endpoints (_Note:_ Authentication is out o
so you need to specify the start and end labels accordingly. Querying a long time into the history will cause additional
load to the index server and make the query slower.
> This endpoint doesn't accept [sample query](./usage.md#counting-logs).
Responses looks like this:
```json

@ -21,7 +21,7 @@ Read more about the Explore feature in the [Grafana docs](http://docs.grafana.or
## Searching with Labels and Distributed Grep
A log query consists of two parts: **log stream selector**, and a **filter expression**. For performance reasons you need to start by choosing a set of log streams using a Prometheus-style log stream selector.
A log filter query consists of two parts: **log stream selector**, and a **filter expression**. For performance reasons you need to start by choosing a set of log streams using a Prometheus-style log stream selector.
The log stream selector will reduce the number of log streams to a manageable volume and then the regex search expression is used to do a distributed grep over those log streams.
@ -76,3 +76,62 @@ The query language is still under development to support more features, e.g.,:
- Number extraction for timeseries based on number in log messages
- JSON accessors for filtering of JSON-structured logs
- Context (like `grep -C n`)
## Counting logs
Loki's LogQL support sample expression allowing to count entries per stream after the regex filtering stage.
### Range Vector aggregation
The language shares the same [range vector](https://prometheus.io/docs/prometheus/latest/querying/basics/#range-vector-selectors) concept from Prometheus, except that the selected range of samples contains a value of one for each log entry. You can then apply an aggregation over the selected range to transform it into an instant vector.
`rate` calculates the number of entries per second and `count_over_time` count of entries for the each log stream within the range.
In this example, we count all the log lines we have recorded within the last 5min for the mysql job.
> `count_over_time({job="mysql"}[5m])`
A range vector aggregation can also be applied to a [Filter Expression](#filter-expression), allowing you to select only matching log entries.
> `rate( ( {job="mysql"} |= "error" != "timeout)[10s] ) )`
The query above will compute the per second rate of all errors except those containing `timeout` within the last 10 seconds.
You can then use aggregation operators over the range vector aggregation.
### Aggregation operators
Like [PromQL](https://prometheus.io/docs/prometheus/latest/querying/operators/#aggregation-operators), Loki's LogQL support a subset of built-in aggregation operators that can be used to aggregate the element of a single vector, resulting in a new vector of fewer elements with aggregated values:
- `sum` (calculate sum over dimensions)
- `min` (select minimum over dimensions)
- `max` (select maximum over dimensions)
- `avg` (calculate the average over dimensions)
- `stddev` (calculate population standard deviation over dimensions)
- `stdvar` (calculate population standard variance over dimensions)
- `count` (count number of elements in the vector)
- `bottomk` (smallest k elements by sample value)
- `topk` (largest k elements by sample value)
These operators can either be used to aggregate over all label dimensions or preserve distinct dimensions by including a without or by clause.
> `<aggr-op>([parameter,] <vector expression>) [without|by (<label list>)]`
parameter is only required for `topk` and `bottomk`. without removes the listed labels from the result vector, while all other labels are preserved the output. by does the opposite and drops labels that are not listed in the by clause, even if their label values are identical between all elements of the vector.
topk and bottomk are different from other aggregators in that a subset of the input samples, including the original labels, are returned in the result vector. by and without are only used to bucket the input vector.
Example:
For example, this query will return the top 10 applications by highest log throughput.
> `topk(10,sum(rate({region="us-east1"}[5m]) by (name))`
The count of log during the last 5m by level.
> `sum(count_over_time({job="mysql"}[5m])) by (level)`
The rate of http request received with method GET from nginx access logs.
> `avg(rate(({job="nginx"} |= "GET")[10s])) by (region)`

@ -267,7 +267,7 @@ func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_
}
instance := i.getOrCreateInstance(instanceID)
tailer, err := newTailer(instanceID, req.Query, req.Regex, queryServer)
tailer, err := newTailer(instanceID, req.Query, queryServer)
if err != nil {
return err
}

@ -55,10 +55,10 @@ func TestIngester(t *testing.T) {
ctx: ctx,
}
err = i.Query(&logproto.QueryRequest{
Query: `{foo="bar"}`,
Limit: 100,
Start: time.Unix(0, 0),
End: time.Unix(1, 0),
Selector: `{foo="bar"}`,
Limit: 100,
Start: time.Unix(0, 0),
End: time.Unix(1, 0),
}, &result)
require.NoError(t, err)
require.Len(t, result.resps, 1)
@ -68,10 +68,10 @@ func TestIngester(t *testing.T) {
ctx: ctx,
}
err = i.Query(&logproto.QueryRequest{
Query: `{foo="bar",bar="baz1"}`,
Limit: 100,
Start: time.Unix(0, 0),
End: time.Unix(1, 0),
Selector: `{foo="bar",bar="baz1"}`,
Limit: 100,
Start: time.Unix(0, 0),
End: time.Unix(1, 0),
}, &result)
require.NoError(t, err)
require.Len(t, result.resps, 1)
@ -82,10 +82,10 @@ func TestIngester(t *testing.T) {
ctx: ctx,
}
err = i.Query(&logproto.QueryRequest{
Query: `{foo="bar",bar="baz2"}`,
Limit: 100,
Start: time.Unix(0, 0),
End: time.Unix(1, 0),
Selector: `{foo="bar",bar="baz2"}`,
Limit: 100,
Start: time.Unix(0, 0),
End: time.Unix(1, 0),
}, &result)
require.NoError(t, err)
require.Len(t, result.resps, 1)

@ -121,28 +121,20 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error {
}
func (i *instance) Query(req *logproto.QueryRequest, queryServer logproto.Querier_QueryServer) error {
expr, err := logql.ParseExpr(req.Query)
expr, err := (logql.SelectParams{QueryRequest: req}).LogSelector()
if err != nil {
return err
}
if req.Regex != "" {
expr = logql.NewFilterExpr(expr, labels.MatchRegexp, req.Regex)
filter, err := expr.Filter()
if err != nil {
return err
}
querier := logql.QuerierFunc(func(matchers []*labels.Matcher, filter logql.Filter) (iter.EntryIterator, error) {
iters, err := i.lookupStreams(req, matchers, filter)
if err != nil {
return nil, err
}
return iter.NewHeapIterator(iters, req.Direction), nil
})
iter, err := expr.Eval(querier)
iters, err := i.lookupStreams(req, expr.Matchers(), filter)
if err != nil {
return err
}
iter := iter.NewHeapIterator(iters, req.Direction)
defer helpers.LogError("closing iterator", iter.Close)
return sendBatches(iter, queryServer, req.Limit)
@ -254,6 +246,9 @@ func isDone(ctx context.Context) bool {
}
func sendBatches(i iter.EntryIterator, queryServer logproto.Querier_QueryServer, limit uint32) error {
if limit <= 0 {
return sendAllBatches(i, queryServer)
}
sent := uint32(0)
for sent < limit && !isDone(queryServer.Context()) {
batch, batchSize, err := iter.ReadBatch(i, helpers.MinUint32(queryBatchSize, limit-sent))
@ -272,3 +267,20 @@ func sendBatches(i iter.EntryIterator, queryServer logproto.Querier_QueryServer,
}
return nil
}
func sendAllBatches(i iter.EntryIterator, queryServer logproto.Querier_QueryServer) error {
for !isDone(queryServer.Context()) {
batch, _, err := iter.ReadBatch(i, queryBatchSize)
if err != nil {
return err
}
if len(batch.Streams) == 0 {
return nil
}
if err := queryServer.Send(batch); err != nil {
return err
}
}
return nil
}

@ -7,8 +7,6 @@ import (
"sync"
"time"
"github.com/grafana/loki/pkg/iter"
"github.com/cortexproject/cortex/pkg/util"
"github.com/go-kit/kit/log/level"
"github.com/grafana/loki/pkg/logproto"
@ -23,6 +21,7 @@ type tailer struct {
id uint32
orgID string
matchers []*labels.Matcher
filter logql.Filter
expr logql.Expr
sendChan chan *logproto.Stream
@ -36,24 +35,25 @@ type tailer struct {
conn logproto.Querier_TailServer
}
func newTailer(orgID, query, regex string, conn logproto.Querier_TailServer) (*tailer, error) {
expr, err := logql.ParseExpr(query)
func newTailer(orgID, query string, conn logproto.Querier_TailServer) (*tailer, error) {
expr, err := logql.ParseLogSelector(query)
if err != nil {
return nil, err
}
matchers := expr.Matchers()
if regex != "" {
expr = logql.NewFilterExpr(expr, labels.MatchRegexp, regex)
filter, err := expr.Filter()
if err != nil {
return nil, err
}
matchers := expr.Matchers()
return &tailer{
orgID: orgID,
matchers: matchers,
filter: filter,
sendChan: make(chan *logproto.Stream, bufferSizeForTailResponse),
conn: conn,
droppedStreams: []*logproto.DroppedStream{},
id: generateUniqueID(orgID, query, regex),
id: generateUniqueID(orgID, query),
done: make(chan struct{}),
expr: expr,
}, nil
@ -111,11 +111,7 @@ func (t *tailer) send(stream logproto.Stream) {
return
}
err := t.filterEntriesInStream(&stream)
if err != nil {
t.dropStream(stream)
return
}
t.filterEntriesInStream(&stream)
if len(stream.Entries) == 0 {
return
@ -128,24 +124,14 @@ func (t *tailer) send(stream logproto.Stream) {
}
}
func (t *tailer) filterEntriesInStream(stream *logproto.Stream) error {
querier := logql.QuerierFunc(func(matchers []*labels.Matcher, filter logql.Filter) (iter.EntryIterator, error) {
var filteredEntries []logproto.Entry
for _, e := range stream.Entries {
if filter == nil || filter([]byte(e.Line)) {
filteredEntries = append(filteredEntries, e)
}
func (t *tailer) filterEntriesInStream(stream *logproto.Stream) {
var filteredEntries []logproto.Entry
for _, e := range stream.Entries {
if t.filter == nil || t.filter([]byte(e.Line)) {
filteredEntries = append(filteredEntries, e)
}
stream.Entries = filteredEntries
return nil, nil
})
_, err := t.expr.Eval(querier)
if err != nil {
return err
}
return nil
stream.Entries = filteredEntries
}
// Returns true if tailer is interested in the passed labelset
@ -230,11 +216,10 @@ func (t *tailer) getID() uint32 {
}
// An id is useful in managing tailer instances
func generateUniqueID(orgID, query, regex string) uint32 {
func generateUniqueID(orgID, query string) uint32 {
uniqueID := fnv.New32()
_, _ = uniqueID.Write([]byte(orgID))
_, _ = uniqueID.Write([]byte(query))
_, _ = uniqueID.Write([]byte(regex))
timeNow := make([]byte, 8)
binary.LittleEndian.PutUint64(timeNow, uint64(time.Now().UnixNano()))

@ -574,3 +574,93 @@ func (i *entryIteratorForward) Error() error { return nil }
func (i *entryIteratorForward) Labels() string {
return i.cur.labels
}
type peekingEntryIterator struct {
iter EntryIterator
cache *entryWithLabels
next *entryWithLabels
}
// PeekingEntryIterator is an entry iterator that can look ahead an entry
// using `Peek` without advancing its cursor.
type PeekingEntryIterator interface {
EntryIterator
Peek() (string, logproto.Entry, bool)
}
// NewPeekingIterator creates a new peeking iterator.
func NewPeekingIterator(iter EntryIterator) PeekingEntryIterator {
// initialize the next entry so we can peek right from the start.
var cache *entryWithLabels
if iter.Next() {
cache = &entryWithLabels{
entry: iter.Entry(),
labels: iter.Labels(),
}
}
return &peekingEntryIterator{
iter: iter,
cache: cache,
next: cache,
}
}
// Next implements `EntryIterator`
func (it *peekingEntryIterator) Next() bool {
if it.cache != nil {
it.next = &entryWithLabels{
entry: it.cache.entry,
labels: it.cache.labels,
}
return it.cacheNext()
}
return false
}
// cacheNext caches the next element if it exists.
func (it *peekingEntryIterator) cacheNext() bool {
if it.iter.Next() {
it.cache = &entryWithLabels{
entry: it.iter.Entry(),
labels: it.iter.Labels(),
}
return true
}
it.cache = nil
return false
}
// Peek implements `PeekingEntryIterator`
func (it *peekingEntryIterator) Peek() (string, logproto.Entry, bool) {
if it.cache != nil {
return it.cache.labels, it.cache.entry, true
}
return "", logproto.Entry{}, false
}
// Labels implements `EntryIterator`
func (it *peekingEntryIterator) Labels() string {
if it.next != nil {
return it.next.labels
}
return ""
}
// Entry implements `EntryIterator`
func (it *peekingEntryIterator) Entry() logproto.Entry {
if it.next != nil {
return it.next.entry
}
return logproto.Entry{}
}
// Error implements `EntryIterator`
func (it *peekingEntryIterator) Error() error {
return it.iter.Error()
}
// Close implements `EntryIterator`
func (it *peekingEntryIterator) Close() error {
return it.iter.Close()
}

@ -12,7 +12,7 @@ import (
)
const testSize = 10
const defaultLabels = "{foo: \"baz\"}"
const defaultLabels = "{foo=\"baz\"}"
func TestIterator(t *testing.T) {
for i, tc := range []struct {
@ -272,3 +272,66 @@ func TestEntryIteratorForward(t *testing.T) {
assert.Equal(t, nil, forwardIterator.Error())
assert.NoError(t, forwardIterator.Close())
}
func Test_PeekingIterator(t *testing.T) {
iter := NewPeekingIterator(NewStreamIterator(&logproto.Stream{
Entries: []logproto.Entry{
{
Timestamp: time.Unix(0, 1),
},
{
Timestamp: time.Unix(0, 2),
},
{
Timestamp: time.Unix(0, 3),
},
},
}))
_, peek, hasNext := iter.Peek()
if peek.Timestamp.UnixNano() != 1 {
t.Fatal("wrong peeked time.")
}
if !hasNext {
t.Fatal("should have next.")
}
hasNext = iter.Next()
if !hasNext {
t.Fatal("should have next.")
}
if iter.Entry().Timestamp.UnixNano() != 1 {
t.Fatal("wrong peeked time.")
}
_, peek, hasNext = iter.Peek()
if peek.Timestamp.UnixNano() != 2 {
t.Fatal("wrong peeked time.")
}
if !hasNext {
t.Fatal("should have next.")
}
hasNext = iter.Next()
if !hasNext {
t.Fatal("should have next.")
}
if iter.Entry().Timestamp.UnixNano() != 2 {
t.Fatal("wrong peeked time.")
}
_, peek, hasNext = iter.Peek()
if peek.Timestamp.UnixNano() != 3 {
t.Fatal("wrong peeked time.")
}
if !hasNext {
t.Fatal("should have next.")
}
hasNext = iter.Next()
if hasNext {
t.Fatal("should not have next.")
}
if iter.Entry().Timestamp.UnixNano() != 3 {
t.Fatal("wrong peeked time.")
}
_, _, hasNext = iter.Peek()
if hasNext {
t.Fatal("should not have next.")
}
}

@ -135,12 +135,11 @@ func (m *PushResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_PushResponse proto.InternalMessageInfo
type QueryRequest struct {
Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
Start time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start"`
End time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end"`
Direction Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"`
Regex string `protobuf:"bytes,6,opt,name=regex,proto3" json:"regex,omitempty"`
}
func (m *QueryRequest) Reset() { *m = QueryRequest{} }
@ -175,9 +174,9 @@ func (m *QueryRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_QueryRequest proto.InternalMessageInfo
func (m *QueryRequest) GetQuery() string {
func (m *QueryRequest) GetSelector() string {
if m != nil {
return m.Query
return m.Selector
}
return ""
}
@ -210,13 +209,6 @@ func (m *QueryRequest) GetDirection() Direction {
return FORWARD
}
func (m *QueryRequest) GetRegex() string {
if m != nil {
return m.Regex
}
return ""
}
type QueryResponse struct {
Streams []*Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"`
}
@ -474,10 +466,9 @@ func (m *Entry) GetLine() string {
type TailRequest struct {
Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
Regex string `protobuf:"bytes,2,opt,name=regex,proto3" json:"regex,omitempty"`
DelayFor uint32 `protobuf:"varint,3,opt,name=delayFor,proto3" json:"delayFor,omitempty"`
Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"`
Start time.Time `protobuf:"bytes,5,opt,name=start,proto3,stdtime" json:"start"`
DelayFor uint32 `protobuf:"varint,2,opt,name=delayFor,proto3" json:"delayFor,omitempty"`
Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
Start time.Time `protobuf:"bytes,4,opt,name=start,proto3,stdtime" json:"start"`
}
func (m *TailRequest) Reset() { *m = TailRequest{} }
@ -519,13 +510,6 @@ func (m *TailRequest) GetQuery() string {
return ""
}
func (m *TailRequest) GetRegex() string {
if m != nil {
return m.Regex
}
return ""
}
func (m *TailRequest) GetDelayFor() uint32 {
if m != nil {
return m.DelayFor
@ -875,65 +859,64 @@ func init() {
func init() { proto.RegisterFile("logproto.proto", fileDescriptor_7a8976f235a02f79) }
var fileDescriptor_7a8976f235a02f79 = []byte{
// 916 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x4f, 0x6f, 0x1b, 0x45,
0x14, 0xdf, 0x71, 0xd6, 0x6b, 0xfb, 0xf9, 0x4f, 0xa2, 0xa1, 0x24, 0x8b, 0x41, 0x6b, 0x6b, 0x0e,
0xd4, 0x2a, 0xc2, 0x01, 0x53, 0x28, 0x14, 0x24, 0x14, 0xb7, 0x44, 0x44, 0x20, 0xd1, 0x6e, 0x22,
0x71, 0x42, 0xd5, 0x26, 0x3b, 0x71, 0x56, 0xac, 0x77, 0xdd, 0xd9, 0x59, 0x44, 0x6e, 0x7c, 0x84,
0xde, 0xf8, 0x08, 0x20, 0x0e, 0x7c, 0x04, 0xce, 0x3d, 0xe6, 0xd8, 0x53, 0x20, 0xce, 0x05, 0xe5,
0xd4, 0x1b, 0x57, 0x34, 0x7f, 0xd6, 0x3b, 0x4e, 0x23, 0xda, 0x70, 0xb1, 0xe7, 0xcd, 0xbc, 0x37,
0xf3, 0x7e, 0xbf, 0xf7, 0x7b, 0x6f, 0xa1, 0x13, 0xa7, 0x93, 0x19, 0x4b, 0x79, 0x3a, 0x94, 0xbf,
0xb8, 0x5e, 0xd8, 0xdd, 0xde, 0x24, 0x4d, 0x27, 0x31, 0xdd, 0x94, 0xd6, 0x7e, 0x7e, 0xb8, 0xc9,
0xa3, 0x29, 0xcd, 0x78, 0x30, 0x9d, 0x29, 0xd7, 0xee, 0xbb, 0x93, 0x88, 0x1f, 0xe5, 0xfb, 0xc3,
0x83, 0x74, 0xba, 0x39, 0x49, 0x27, 0x69, 0xe9, 0x29, 0x2c, 0x69, 0xc8, 0x95, 0x72, 0x27, 0xdb,
0xd0, 0x7c, 0x90, 0x67, 0x47, 0x3e, 0x7d, 0x9c, 0xd3, 0x8c, 0xe3, 0x3b, 0x50, 0xcb, 0x38, 0xa3,
0xc1, 0x34, 0x73, 0x51, 0x7f, 0x65, 0xd0, 0x1c, 0xad, 0x0d, 0x17, 0xa9, 0xec, 0xca, 0x83, 0x71,
0xf3, 0xe2, 0xb4, 0x57, 0x38, 0xf9, 0xc5, 0x82, 0x74, 0xa0, 0xa5, 0xee, 0xc9, 0x66, 0x69, 0x92,
0x51, 0xf2, 0x0f, 0x82, 0xd6, 0xc3, 0x9c, 0xb2, 0xe3, 0xe2, 0xe6, 0x1b, 0x50, 0x7d, 0x2c, 0x6c,
0x17, 0xf5, 0xd1, 0xa0, 0xe1, 0x2b, 0x43, 0xec, 0xc6, 0xd1, 0x34, 0xe2, 0x6e, 0xa5, 0x8f, 0x06,
0x6d, 0x5f, 0x19, 0xf8, 0x2e, 0x54, 0x33, 0x1e, 0x30, 0xee, 0xae, 0xf4, 0xd1, 0xa0, 0x39, 0xea,
0x0e, 0x15, 0xe8, 0x61, 0x01, 0x65, 0xb8, 0x57, 0x80, 0x1e, 0xd7, 0x9f, 0x9e, 0xf6, 0xac, 0x27,
0x7f, 0xf6, 0x90, 0xaf, 0x42, 0xf0, 0x47, 0xb0, 0x42, 0x93, 0xd0, 0xb5, 0xaf, 0x11, 0x29, 0x02,
0xf0, 0xfb, 0xd0, 0x08, 0x23, 0x46, 0x0f, 0x78, 0x94, 0x26, 0x6e, 0xb5, 0x8f, 0x06, 0x9d, 0xd1,
0x6b, 0x25, 0xf6, 0xfb, 0xc5, 0x91, 0x5f, 0x7a, 0x89, 0xe4, 0x19, 0x9d, 0xd0, 0x1f, 0x5d, 0x47,
0x41, 0x92, 0x06, 0xf9, 0x14, 0xda, 0x1a, 0xb8, 0xa2, 0x02, 0xdf, 0x7a, 0x29, 0xa7, 0x25, 0x8d,
0xbf, 0x23, 0x68, 0x7d, 0x1d, 0xec, 0xd3, 0xb8, 0xa0, 0x0d, 0x83, 0x9d, 0x04, 0x53, 0xaa, 0x59,
0x93, 0x6b, 0xbc, 0x0e, 0xce, 0x0f, 0x41, 0x9c, 0xd3, 0x4c, 0xb2, 0x56, 0xf7, 0xb5, 0x75, 0x5d,
0xda, 0xd0, 0xff, 0xa6, 0x0d, 0x2d, 0x68, 0x23, 0x37, 0xa1, 0xad, 0xf3, 0xd5, 0x68, 0xcb, 0xe4,
0x04, 0xd8, 0x46, 0x91, 0x1c, 0x39, 0x02, 0x47, 0x81, 0xc5, 0x04, 0x9c, 0x58, 0x84, 0x64, 0x0a,
0xd4, 0x18, 0x2e, 0x4e, 0x7b, 0x7a, 0xc7, 0xd7, 0xff, 0xf8, 0x2e, 0xd4, 0x68, 0xc2, 0x59, 0x24,
0x31, 0x0a, 0xce, 0x56, 0x4b, 0xce, 0xbe, 0x48, 0x38, 0x3b, 0x1e, 0xaf, 0x8a, 0xf2, 0x09, 0x29,
0x6a, 0x3f, 0xbf, 0x58, 0x90, 0x14, 0xaa, 0xd2, 0x05, 0x7f, 0x09, 0x8d, 0x45, 0x77, 0xc8, 0xb7,
0xfe, 0x1b, 0x59, 0x47, 0xdf, 0x58, 0xe1, 0x99, 0xc4, 0x57, 0x06, 0xe3, 0xb7, 0xc0, 0x8e, 0xa3,
0x84, 0x4a, 0xbe, 0x1b, 0xe3, 0xfa, 0xc5, 0x69, 0x4f, 0xda, 0xbe, 0xfc, 0x25, 0xbf, 0x20, 0x68,
0xee, 0x05, 0x51, 0xfc, 0x52, 0xa9, 0x2b, 0xb5, 0x54, 0x0c, 0xb5, 0xe0, 0x2e, 0xd4, 0x43, 0x1a,
0x07, 0xc7, 0xdb, 0x29, 0x93, 0x65, 0x6b, 0xfb, 0x0b, 0xbb, 0x6c, 0x0e, 0xfb, 0xca, 0xe6, 0xa8,
0x5e, 0xbb, 0x39, 0xc8, 0x31, 0xb4, 0x54, 0xa2, 0xba, 0x58, 0x03, 0x70, 0x94, 0xf2, 0x34, 0x3d,
0x2f, 0x2a, 0x53, 0x9f, 0xe3, 0xcf, 0xa1, 0x13, 0xb2, 0x74, 0x36, 0xa3, 0xe1, 0xae, 0xd6, 0xb2,
0xaa, 0xcb, 0x86, 0xd1, 0x23, 0xe6, 0xb9, 0x7f, 0xc9, 0x9d, 0xfc, 0x8c, 0xa0, 0xbd, 0xe4, 0x81,
0x3f, 0x06, 0xfb, 0x90, 0xa5, 0xd3, 0x57, 0xa8, 0x4c, 0x89, 0x43, 0x46, 0xe0, 0xdb, 0x50, 0xe1,
0xa9, 0xe4, 0xf1, 0x55, 0xe3, 0x2a, 0x3c, 0x15, 0xca, 0xd4, 0xba, 0x5b, 0x91, 0x15, 0xd0, 0x16,
0xf9, 0x0d, 0xc1, 0xaa, 0x88, 0xd9, 0xa5, 0x42, 0x3e, 0xf7, 0x8e, 0xf2, 0xe4, 0x7b, 0x3c, 0x80,
0x35, 0xf1, 0xd2, 0xa3, 0x28, 0x99, 0xd0, 0x8c, 0x53, 0xf6, 0x28, 0x0a, 0x75, 0x35, 0x3b, 0x62,
0x7f, 0x47, 0x6f, 0xef, 0x84, 0x78, 0x03, 0x6a, 0x79, 0xa6, 0x1c, 0x54, 0x61, 0x1d, 0x61, 0xee,
0x84, 0xf8, 0x1d, 0xe3, 0x39, 0xc1, 0x94, 0x31, 0x4d, 0x64, 0xc7, 0x3c, 0x08, 0x22, 0xb6, 0xd0,
0xfb, 0x4d, 0x70, 0x0e, 0xc4, 0xc3, 0x99, 0x6b, 0x5f, 0x96, 0xbb, 0x4c, 0xc8, 0xd7, 0xc7, 0xe4,
0x43, 0x68, 0x2c, 0xa2, 0xaf, 0x1c, 0x0e, 0x37, 0xa0, 0x2a, 0x3b, 0xae, 0x90, 0x99, 0x34, 0xc8,
0x9b, 0x50, 0x55, 0xc0, 0x30, 0xd8, 0x61, 0xc0, 0x03, 0x19, 0xd2, 0xf2, 0xe5, 0x9a, 0xb8, 0xb0,
0xbe, 0xc7, 0x82, 0x24, 0x3b, 0xa4, 0x4c, 0x3a, 0x65, 0x85, 0x3e, 0x6e, 0xbd, 0x0d, 0x8d, 0xc5,
0xe4, 0xc3, 0x4d, 0xa8, 0x6d, 0x7f, 0xe3, 0x7f, 0xbb, 0xe5, 0xdf, 0x5f, 0xb3, 0x70, 0x0b, 0xea,
0xe3, 0xad, 0x7b, 0x5f, 0x49, 0x0b, 0x8d, 0xb6, 0xc0, 0x11, 0xd3, 0x9f, 0x32, 0x7c, 0x07, 0x6c,
0xb1, 0xc2, 0xaf, 0x97, 0x00, 0x8c, 0xef, 0x4b, 0x77, 0xfd, 0xf2, 0xb6, 0xfe, 0x5c, 0x58, 0xa3,
0x3f, 0x10, 0xd4, 0xc4, 0xdc, 0x8c, 0x28, 0xc3, 0x9f, 0x41, 0x55, 0x8e, 0x50, 0x6c, 0xb8, 0x9b,
0x1f, 0x93, 0xee, 0xc6, 0x0b, 0xfb, 0xc5, 0x3d, 0xef, 0x21, 0xd1, 0x20, 0x92, 0x22, 0x33, 0xda,
0x9c, 0xa9, 0x66, 0xf4, 0xd2, 0xec, 0x22, 0x16, 0xfe, 0x04, 0x6c, 0xd1, 0x20, 0x66, 0xfa, 0x46,
0x67, 0x9b, 0xe9, 0x9b, 0x7d, 0x24, 0x9e, 0x1d, 0x7d, 0x07, 0xf5, 0x42, 0x16, 0xf8, 0x21, 0x74,
0x96, 0x19, 0xc5, 0x6f, 0x18, 0x91, 0xcb, 0x5a, 0xeb, 0xf6, 0x8d, 0xa3, 0x2b, 0xcb, 0x40, 0xac,
0x01, 0x1a, 0xdf, 0x3e, 0x39, 0xf3, 0xac, 0x67, 0x67, 0x9e, 0xf5, 0xfc, 0xcc, 0x43, 0x3f, 0xcd,
0x3d, 0xf4, 0xeb, 0xdc, 0x43, 0x4f, 0xe7, 0x1e, 0x3a, 0x99, 0x7b, 0xe8, 0xaf, 0xb9, 0x87, 0xfe,
0x9e, 0x7b, 0xd6, 0xf3, 0xb9, 0x87, 0x9e, 0x9c, 0x7b, 0xd6, 0xc9, 0xb9, 0x67, 0x3d, 0x3b, 0xf7,
0xac, 0x7d, 0x47, 0xde, 0xfb, 0xc1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xdf, 0x07, 0x94,
0x51, 0x08, 0x00, 0x00,
// 908 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcd, 0x6e, 0x23, 0x45,
0x10, 0x9e, 0xb6, 0xc7, 0x7f, 0x65, 0xc7, 0x89, 0x9a, 0x90, 0x18, 0x83, 0xc6, 0x56, 0x1f, 0x58,
0x6b, 0x11, 0x0e, 0x98, 0x85, 0x85, 0x05, 0x09, 0xc5, 0xbb, 0x44, 0x44, 0x20, 0xb1, 0x3b, 0x89,
0xc4, 0x09, 0xad, 0x26, 0x99, 0x8e, 0x33, 0x62, 0x3c, 0xed, 0xed, 0x6e, 0x23, 0xe5, 0xc6, 0x23,
0x2c, 0x27, 0x5e, 0x01, 0x71, 0xe0, 0x11, 0x38, 0xef, 0x31, 0xc7, 0x3d, 0x05, 0xe2, 0x1c, 0x40,
0x39, 0xed, 0x23, 0xa0, 0xfe, 0x99, 0x1f, 0x67, 0x23, 0xd8, 0x70, 0xb1, 0xbb, 0xba, 0xab, 0xba,
0xeb, 0xfb, 0xea, 0xab, 0x1a, 0x68, 0xc7, 0x6c, 0x32, 0xe3, 0x4c, 0xb2, 0xa1, 0xfe, 0xc5, 0xf5,
0xd4, 0xee, 0xf6, 0x26, 0x8c, 0x4d, 0x62, 0xba, 0xa5, 0xad, 0x83, 0xf9, 0xd1, 0x96, 0x8c, 0xa6,
0x54, 0xc8, 0x60, 0x3a, 0x33, 0xae, 0xdd, 0x77, 0x27, 0x91, 0x3c, 0x9e, 0x1f, 0x0c, 0x0f, 0xd9,
0x74, 0x6b, 0xc2, 0x26, 0x2c, 0xf7, 0x54, 0x96, 0x36, 0xf4, 0xca, 0xb8, 0x93, 0x1d, 0x68, 0x3e,
0x9c, 0x8b, 0x63, 0x9f, 0x3e, 0x99, 0x53, 0x21, 0xf1, 0x5d, 0xa8, 0x09, 0xc9, 0x69, 0x30, 0x15,
0x1d, 0xd4, 0x2f, 0x0f, 0x9a, 0xa3, 0xb5, 0x61, 0x96, 0xca, 0x9e, 0x3e, 0x18, 0x37, 0x2f, 0xcf,
0x7a, 0xa9, 0x93, 0x9f, 0x2e, 0x48, 0x1b, 0x5a, 0xe6, 0x1e, 0x31, 0x63, 0x89, 0xa0, 0xe4, 0x2f,
0x04, 0xad, 0x47, 0x73, 0xca, 0x4f, 0xd2, 0x9b, 0xbb, 0x50, 0x17, 0x34, 0xa6, 0x87, 0x92, 0xf1,
0x0e, 0xea, 0xa3, 0x41, 0xc3, 0xcf, 0x6c, 0xbc, 0x0e, 0x95, 0x38, 0x9a, 0x46, 0xb2, 0x53, 0xea,
0xa3, 0xc1, 0x8a, 0x6f, 0x0c, 0x7c, 0x0f, 0x2a, 0x42, 0x06, 0x5c, 0x76, 0xca, 0x7d, 0x34, 0x68,
0x8e, 0xba, 0x43, 0x03, 0x7d, 0x98, 0x02, 0x1a, 0xee, 0xa7, 0xd0, 0xc7, 0xf5, 0x67, 0x67, 0x3d,
0xe7, 0xe9, 0x1f, 0x3d, 0xe4, 0x9b, 0x10, 0xfc, 0x11, 0x94, 0x69, 0x12, 0x76, 0xdc, 0x1b, 0x44,
0xaa, 0x00, 0xfc, 0x3e, 0x34, 0xc2, 0x88, 0xd3, 0x43, 0x19, 0xb1, 0xa4, 0x53, 0xe9, 0xa3, 0x41,
0x7b, 0xf4, 0x5a, 0xce, 0xc0, 0x83, 0xf4, 0xc8, 0xcf, 0xbd, 0xc8, 0xa7, 0xb0, 0x62, 0x81, 0x1a,
0xe8, 0xf8, 0xf6, 0x7f, 0x72, 0x98, 0xd3, 0xf6, 0x1b, 0x82, 0xd6, 0xd7, 0xc1, 0x01, 0x8d, 0x53,
0x9a, 0x30, 0xb8, 0x49, 0x30, 0xa5, 0x96, 0x22, 0xbd, 0xc6, 0x1b, 0x50, 0xfd, 0x21, 0x88, 0xe7,
0x54, 0x68, 0x7e, 0xea, 0xbe, 0xb5, 0x6e, 0x4a, 0x10, 0xfa, 0xdf, 0x04, 0xa1, 0x8c, 0x20, 0x72,
0x0b, 0x56, 0x6c, 0xbe, 0x16, 0x6d, 0x9e, 0x9c, 0x02, 0xdb, 0x48, 0x93, 0x23, 0xc7, 0x50, 0x35,
0x60, 0x31, 0x81, 0x6a, 0xac, 0x42, 0x84, 0x01, 0x35, 0x86, 0xcb, 0xb3, 0x9e, 0xdd, 0xf1, 0xed,
0x3f, 0xbe, 0x07, 0x35, 0x9a, 0x48, 0x1e, 0x69, 0x8c, 0x8a, 0xb3, 0xd5, 0x9c, 0xb3, 0x2f, 0x12,
0xc9, 0x4f, 0xc6, 0xab, 0xaa, 0x50, 0x4a, 0x7a, 0xd6, 0xcf, 0x4f, 0x17, 0x84, 0x41, 0x45, 0xbb,
0xe0, 0x2f, 0xa1, 0x91, 0x75, 0x83, 0x7e, 0xeb, 0xdf, 0x91, 0xb5, 0xed, 0x8d, 0x25, 0x29, 0x34,
0xbe, 0x3c, 0x18, 0xbf, 0x05, 0x6e, 0x1c, 0x25, 0x54, 0xf3, 0xdd, 0x18, 0xd7, 0x2f, 0xcf, 0x7a,
0xda, 0xf6, 0xf5, 0x2f, 0xf9, 0x09, 0x41, 0x73, 0x3f, 0x88, 0xb2, 0x9a, 0xad, 0x43, 0xe5, 0x89,
0x52, 0x80, 0x2d, 0x9a, 0x31, 0x94, 0xe0, 0x43, 0x1a, 0x07, 0x27, 0x3b, 0x8c, 0x5b, 0x5d, 0x67,
0x76, 0x2e, 0xf8, 0xf2, 0xb5, 0x82, 0x77, 0x6f, 0x2c, 0x78, 0x72, 0x02, 0x2d, 0x93, 0x92, 0x2d,
0xcb, 0x00, 0xaa, 0x46, 0x63, 0x96, 0x88, 0x97, 0x35, 0x68, 0xcf, 0xf1, 0xe7, 0xd0, 0x0e, 0x39,
0x9b, 0xcd, 0x68, 0xb8, 0x67, 0x55, 0x6b, 0x2a, 0xb0, 0x59, 0xd0, 0x7d, 0xf1, 0xdc, 0xbf, 0xe2,
0x4e, 0x7e, 0x46, 0xb0, 0xb2, 0xe4, 0x81, 0x3f, 0x06, 0xf7, 0x88, 0xb3, 0xe9, 0x2b, 0xd4, 0x20,
0xc7, 0xa1, 0x23, 0xf0, 0x1d, 0x28, 0x49, 0xa6, 0xe9, 0x7a, 0xd5, 0xb8, 0x92, 0x64, 0x4a, 0x83,
0x56, 0x61, 0x65, 0x5d, 0x01, 0x6b, 0x91, 0x5f, 0x11, 0xac, 0xaa, 0x98, 0x3d, 0xaa, 0x84, 0x72,
0xff, 0x78, 0x9e, 0x7c, 0x8f, 0x07, 0xb0, 0xa6, 0x5e, 0x7a, 0x1c, 0x25, 0x13, 0x2a, 0x24, 0xe5,
0x8f, 0xa3, 0xd0, 0xd6, 0xad, 0xad, 0xf6, 0x77, 0xed, 0xf6, 0x6e, 0x88, 0x37, 0xa1, 0x36, 0x17,
0xc6, 0xa1, 0x64, 0xae, 0x55, 0xe6, 0x6e, 0x88, 0xdf, 0x29, 0x3c, 0xa7, 0x98, 0x2a, 0x4c, 0x08,
0xdd, 0x1b, 0x0f, 0x83, 0x88, 0x67, 0xca, 0xbe, 0x05, 0xd5, 0x43, 0xf5, 0xb0, 0xe8, 0xb8, 0x57,
0x85, 0xad, 0x13, 0xf2, 0xed, 0x31, 0xf9, 0x10, 0x1a, 0x59, 0xf4, 0xb5, 0x63, 0x60, 0x1d, 0x2a,
0xba, 0xb7, 0x6c, 0x36, 0xc6, 0x20, 0x6f, 0x42, 0xc5, 0x00, 0xc3, 0xe0, 0x86, 0x81, 0x0c, 0x74,
0x48, 0xcb, 0xd7, 0x6b, 0xd2, 0x81, 0x8d, 0x7d, 0x1e, 0x24, 0xe2, 0x88, 0x72, 0xed, 0x24, 0x52,
0x7d, 0xdc, 0x7e, 0x1b, 0x1a, 0xd9, 0x34, 0xc3, 0x4d, 0xa8, 0xed, 0x7c, 0xe3, 0x7f, 0xbb, 0xed,
0x3f, 0x58, 0x73, 0x70, 0x0b, 0xea, 0xe3, 0xed, 0xfb, 0x5f, 0x69, 0x0b, 0x8d, 0xb6, 0xa1, 0xaa,
0xe6, 0x3a, 0xe5, 0xf8, 0x2e, 0xb8, 0x6a, 0x85, 0x5f, 0xcf, 0x01, 0x14, 0xbe, 0x1c, 0xdd, 0x8d,
0xab, 0xdb, 0xf6, 0x43, 0xe0, 0x8c, 0x7e, 0x47, 0x50, 0x53, 0x13, 0x32, 0xa2, 0x1c, 0x7f, 0x06,
0x15, 0x3d, 0x2c, 0x71, 0xc1, 0xbd, 0xf8, 0x99, 0xe8, 0x6e, 0xbe, 0xb4, 0x9f, 0xde, 0xf3, 0x1e,
0x52, 0x0d, 0xa2, 0x29, 0x2a, 0x46, 0x17, 0xa7, 0x67, 0x31, 0x7a, 0x69, 0x4a, 0x11, 0x07, 0x7f,
0x02, 0xae, 0x6a, 0x90, 0x62, 0xfa, 0x85, 0x1e, 0x2e, 0xa6, 0x5f, 0xec, 0x23, 0xf5, 0xec, 0xe8,
0x3b, 0xa8, 0xa7, 0xb2, 0xc0, 0x8f, 0xa0, 0xbd, 0xcc, 0x28, 0x7e, 0xa3, 0x10, 0xb9, 0xac, 0xb5,
0x6e, 0xbf, 0x70, 0x74, 0x6d, 0x19, 0x88, 0x33, 0x40, 0xe3, 0x3b, 0xa7, 0xe7, 0x9e, 0xf3, 0xfc,
0xdc, 0x73, 0x5e, 0x9c, 0x7b, 0xe8, 0xc7, 0x85, 0x87, 0x7e, 0x59, 0x78, 0xe8, 0xd9, 0xc2, 0x43,
0xa7, 0x0b, 0x0f, 0xfd, 0xb9, 0xf0, 0xd0, 0xdf, 0x0b, 0xcf, 0x79, 0xb1, 0xf0, 0xd0, 0xd3, 0x0b,
0xcf, 0x39, 0xbd, 0xf0, 0x9c, 0xe7, 0x17, 0x9e, 0x73, 0x50, 0xd5, 0xf7, 0x7e, 0xf0, 0x4f, 0x00,
0x00, 0x00, 0xff, 0xff, 0x4f, 0x32, 0xde, 0x31, 0x2b, 0x08, 0x00, 0x00,
}
func (x Direction) String() string {
@ -1012,7 +995,7 @@ func (this *QueryRequest) Equal(that interface{}) bool {
} else if this == nil {
return false
}
if this.Query != that1.Query {
if this.Selector != that1.Selector {
return false
}
if this.Limit != that1.Limit {
@ -1027,9 +1010,6 @@ func (this *QueryRequest) Equal(that interface{}) bool {
if this.Direction != that1.Direction {
return false
}
if this.Regex != that1.Regex {
return false
}
return true
}
func (this *QueryResponse) Equal(that interface{}) bool {
@ -1212,9 +1192,6 @@ func (this *TailRequest) Equal(that interface{}) bool {
if this.Query != that1.Query {
return false
}
if this.Regex != that1.Regex {
return false
}
if this.DelayFor != that1.DelayFor {
return false
}
@ -1428,14 +1405,13 @@ func (this *QueryRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 10)
s := make([]string, 0, 9)
s = append(s, "&logproto.QueryRequest{")
s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n")
s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n")
s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
s = append(s, "Direction: "+fmt.Sprintf("%#v", this.Direction)+",\n")
s = append(s, "Regex: "+fmt.Sprintf("%#v", this.Regex)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@ -1506,10 +1482,9 @@ func (this *TailRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 9)
s := make([]string, 0, 8)
s = append(s, "&logproto.TailRequest{")
s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n")
s = append(s, "Regex: "+fmt.Sprintf("%#v", this.Regex)+",\n")
s = append(s, "DelayFor: "+fmt.Sprintf("%#v", this.DelayFor)+",\n")
s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
@ -2064,13 +2039,6 @@ func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.Regex) > 0 {
i -= len(m.Regex)
copy(dAtA[i:], m.Regex)
i = encodeVarintLogproto(dAtA, i, uint64(len(m.Regex)))
i--
dAtA[i] = 0x32
}
if m.Direction != 0 {
i = encodeVarintLogproto(dAtA, i, uint64(m.Direction))
i--
@ -2097,10 +2065,10 @@ func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x10
}
if len(m.Query) > 0 {
i -= len(m.Query)
copy(dAtA[i:], m.Query)
i = encodeVarintLogproto(dAtA, i, uint64(len(m.Query)))
if len(m.Selector) > 0 {
i -= len(m.Selector)
copy(dAtA[i:], m.Selector)
i = encodeVarintLogproto(dAtA, i, uint64(len(m.Selector)))
i--
dAtA[i] = 0xa
}
@ -2345,23 +2313,16 @@ func (m *TailRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= n6
i = encodeVarintLogproto(dAtA, i, uint64(n6))
i--
dAtA[i] = 0x2a
dAtA[i] = 0x22
if m.Limit != 0 {
i = encodeVarintLogproto(dAtA, i, uint64(m.Limit))
i--
dAtA[i] = 0x20
dAtA[i] = 0x18
}
if m.DelayFor != 0 {
i = encodeVarintLogproto(dAtA, i, uint64(m.DelayFor))
i--
dAtA[i] = 0x18
}
if len(m.Regex) > 0 {
i -= len(m.Regex)
copy(dAtA[i:], m.Regex)
i = encodeVarintLogproto(dAtA, i, uint64(len(m.Regex)))
i--
dAtA[i] = 0x12
dAtA[i] = 0x10
}
if len(m.Query) > 0 {
i -= len(m.Query)
@ -2664,7 +2625,7 @@ func (m *QueryRequest) Size() (n int) {
}
var l int
_ = l
l = len(m.Query)
l = len(m.Selector)
if l > 0 {
n += 1 + l + sovLogproto(uint64(l))
}
@ -2678,10 +2639,6 @@ func (m *QueryRequest) Size() (n int) {
if m.Direction != 0 {
n += 1 + sovLogproto(uint64(m.Direction))
}
l = len(m.Regex)
if l > 0 {
n += 1 + l + sovLogproto(uint64(l))
}
return n
}
@ -2783,10 +2740,6 @@ func (m *TailRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovLogproto(uint64(l))
}
l = len(m.Regex)
if l > 0 {
n += 1 + l + sovLogproto(uint64(l))
}
if m.DelayFor != 0 {
n += 1 + sovLogproto(uint64(m.DelayFor))
}
@ -2937,12 +2890,11 @@ func (this *QueryRequest) String() string {
return "nil"
}
s := strings.Join([]string{`&QueryRequest{`,
`Query:` + fmt.Sprintf("%v", this.Query) + `,`,
`Selector:` + fmt.Sprintf("%v", this.Selector) + `,`,
`Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
`Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Direction:` + fmt.Sprintf("%v", this.Direction) + `,`,
`Regex:` + fmt.Sprintf("%v", this.Regex) + `,`,
`}`,
}, "")
return s
@ -3018,7 +2970,6 @@ func (this *TailRequest) String() string {
}
s := strings.Join([]string{`&TailRequest{`,
`Query:` + fmt.Sprintf("%v", this.Query) + `,`,
`Regex:` + fmt.Sprintf("%v", this.Regex) + `,`,
`DelayFor:` + fmt.Sprintf("%v", this.DelayFor) + `,`,
`Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
`Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
@ -3286,7 +3237,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -3314,7 +3265,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Query = string(dAtA[iNdEx:postIndex])
m.Selector = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
@ -3420,38 +3371,6 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
break
}
}
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Regex", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogproto
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthLogproto
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthLogproto
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Regex = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogproto(dAtA[iNdEx:])
@ -4124,38 +4043,6 @@ func (m *TailRequest) Unmarshal(dAtA []byte) error {
m.Query = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Regex", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogproto
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthLogproto
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthLogproto
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Regex = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DelayFor", wireType)
}
@ -4174,7 +4061,7 @@ func (m *TailRequest) Unmarshal(dAtA []byte) error {
break
}
}
case 4:
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
@ -4193,7 +4080,7 @@ func (m *TailRequest) Unmarshal(dAtA []byte) error {
break
}
}
case 5:
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
}

@ -27,12 +27,11 @@ message PushResponse {
}
message QueryRequest {
string query = 1;
string selector = 1;
uint32 limit = 2;
google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
Direction direction = 5;
string regex = 6;
}
enum Direction {
@ -67,10 +66,9 @@ message Entry {
message TailRequest {
string query = 1;
string regex = 2;
uint32 delayFor = 3;
uint32 limit = 4;
google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
uint32 delayFor = 2;
uint32 limit = 3;
google.protobuf.Timestamp start = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message TailResponse {

@ -2,60 +2,90 @@ package logql
import (
"bytes"
"context"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
)
// Filter is a line filter sent to a querier to filter out log line.
type Filter func([]byte) bool
type Expr interface{}
type Filter func(line []byte) bool
// SelectParams specifies parameters passed to data selections.
type SelectParams struct {
*logproto.QueryRequest
}
func (s SelectParams) LogSelector() (LogSelectorExpr, error) {
return ParseLogSelector(s.Selector)
}
// QuerierFunc implements Querier.
type QuerierFunc func([]*labels.Matcher, Filter) (iter.EntryIterator, error)
type QuerierFunc func(context.Context, SelectParams) (iter.EntryIterator, error)
// Query implements Querier.
func (q QuerierFunc) Query(ms []*labels.Matcher, entryFilter Filter) (iter.EntryIterator, error) {
return q(ms, entryFilter)
// Select implements Querier.
func (q QuerierFunc) Select(ctx context.Context, p SelectParams) (iter.EntryIterator, error) {
return q(ctx, p)
}
// Querier allows a LogQL expression to fetch an EntryIterator for a
// set of matchers.
type Querier interface {
Query([]*labels.Matcher, Filter) (iter.EntryIterator, error)
Select(context.Context, SelectParams) (iter.EntryIterator, error)
}
// Expr is a LogQL expression.
type Expr interface {
Eval(Querier) (iter.EntryIterator, error)
// LogSelectorExpr is a LogQL expression filtering and returning logs.
type LogSelectorExpr interface {
Filter() (Filter, error)
Matchers() []*labels.Matcher
fmt.Stringer
}
type matchersExpr struct {
matchers []*labels.Matcher
}
func (e *matchersExpr) Eval(q Querier) (iter.EntryIterator, error) {
return q.Query(e.matchers, nil)
func newMatcherExpr(matchers []*labels.Matcher) LogSelectorExpr {
return &matchersExpr{matchers: matchers}
}
func (e *matchersExpr) Matchers() []*labels.Matcher {
return e.matchers
}
func (e *matchersExpr) String() string {
var sb strings.Builder
sb.WriteString("{")
for i, m := range e.matchers {
sb.WriteString(m.String())
if i+1 != len(e.matchers) {
sb.WriteString(",")
}
}
sb.WriteString("}")
return sb.String()
}
func (e *matchersExpr) Filter() (Filter, error) {
return nil, nil
}
type filterExpr struct {
left Expr
left LogSelectorExpr
ty labels.MatchType
match string
}
func (e *filterExpr) Matchers() []*labels.Matcher {
return e.left.Matchers()
}
// NewFilterExpr wraps an existing Expr with a next filter expression.
func NewFilterExpr(left Expr, ty labels.MatchType, match string) Expr {
func NewFilterExpr(left LogSelectorExpr, ty labels.MatchType, match string) LogSelectorExpr {
return &filterExpr{
left: left,
ty: ty,
@ -63,7 +93,28 @@ func NewFilterExpr(left Expr, ty labels.MatchType, match string) Expr {
}
}
func (e *filterExpr) filter() (func([]byte) bool, error) {
func (e *filterExpr) Matchers() []*labels.Matcher {
return e.left.Matchers()
}
func (e *filterExpr) String() string {
var sb strings.Builder
sb.WriteString(e.left.String())
switch e.ty {
case labels.MatchRegexp:
sb.WriteString("|~")
case labels.MatchNotRegexp:
sb.WriteString("!~")
case labels.MatchEqual:
sb.WriteString("|=")
case labels.MatchNotEqual:
sb.WriteString("!=")
}
sb.WriteString(strconv.Quote(e.match))
return sb.String()
}
func (e *filterExpr) Filter() (Filter, error) {
var f func([]byte) bool
switch e.ty {
case labels.MatchRegexp:
@ -97,7 +148,7 @@ func (e *filterExpr) filter() (func([]byte) bool, error) {
}
next, ok := e.left.(*filterExpr)
if ok {
nextFilter, err := next.filter()
nextFilter, err := next.Filter()
if err != nil {
return nil, err
}
@ -108,18 +159,6 @@ func (e *filterExpr) filter() (func([]byte) bool, error) {
return f, nil
}
func (e *filterExpr) Eval(q Querier) (iter.EntryIterator, error) {
f, err := e.filter()
if err != nil {
return nil, err
}
next, err := q.Query(e.left.Matchers(), f)
if err != nil {
return nil, err
}
return next, nil
}
func mustNewMatcher(t labels.MatchType, n, v string) *labels.Matcher {
m, err := labels.NewMatcher(t, n, v)
if err != nil {
@ -127,3 +166,124 @@ func mustNewMatcher(t labels.MatchType, n, v string) *labels.Matcher {
}
return m
}
type logRange struct {
left LogSelectorExpr
interval time.Duration
}
func mustNewRange(left LogSelectorExpr, interval time.Duration) *logRange {
return &logRange{
left: left,
interval: interval,
}
}
const (
OpTypeSum = "sum"
OpTypeAvg = "avg"
OpTypeMax = "max"
OpTypeMin = "min"
OpTypeCount = "count"
OpTypeStddev = "stddev"
OpTypeStdvar = "stdvar"
OpTypeBottomK = "bottomk"
OpTypeTopK = "topk"
OpTypeCountOverTime = "count_over_time"
OpTypeRate = "rate"
)
// SampleExpr is a LogQL expression filtering logs and returning metric samples.
type SampleExpr interface {
// Selector is the LogQL selector to apply to when retrieving logs.
Selector() LogSelectorExpr
// Evaluator returns a `StepEvaluator` that can evaluate the expression.
Evaluator() StepEvaluator
// Close all resources used.
Close() error
}
// StepEvaluator evaluate a single step of a query.
type StepEvaluator interface {
Next() (bool, int64, promql.Vector)
}
// StepEvaluatorFn implements `StepEvaluator`
type StepEvaluatorFn func() (bool, int64, promql.Vector)
func (s StepEvaluatorFn) Next() (bool, int64, promql.Vector) {
return s()
}
type rangeAggregationExpr struct {
left *logRange
operation string
iterator RangeVectorIterator
}
func newRangeAggregationExpr(left *logRange, operation string) SampleExpr {
return &rangeAggregationExpr{
left: left,
operation: operation,
}
}
func (e *rangeAggregationExpr) Close() error {
if e.iterator == nil {
return nil
}
return e.iterator.Close()
}
func (e *rangeAggregationExpr) Selector() LogSelectorExpr {
return e.left.left
}
type grouping struct {
groups []string
without bool
}
type vectorAggregationExpr struct {
left SampleExpr
grouping *grouping
params int
operation string
}
func mustNewVectorAggregationExpr(left SampleExpr, operation string, gr *grouping, params *string) SampleExpr {
var p int
var err error
switch operation {
case OpTypeBottomK, OpTypeTopK:
if params == nil {
panic(newParseError(fmt.Sprintf("parameter required for operation %s", operation), 0, 0))
}
if p, err = strconv.Atoi(*params); err != nil {
panic(newParseError(fmt.Sprintf("invalid parameter %s(%s,", operation, *params), 0, 0))
}
default:
if params != nil {
panic(newParseError(fmt.Sprintf("unsupported parameter for operation %s(%s,", operation, *params), 0, 0))
}
}
if gr == nil {
gr = &grouping{}
}
return &vectorAggregationExpr{
left: left,
operation: operation,
grouping: gr,
params: p,
}
}
func (v *vectorAggregationExpr) Close() error {
return v.left.Close()
}
func (v *vectorAggregationExpr) Selector() LogSelectorExpr {
return v.left.Selector()
}

@ -0,0 +1,28 @@
package logql
import (
"strings"
"testing"
)
func Test_logSelectorExpr_String(t *testing.T) {
tests := []string{
`{foo!~"bar"}`,
`{foo="bar", bar!="baz"}`,
`{foo="bar", bar!="baz"} != "bip" !~ ".+bop"`,
`{foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap"`,
}
for _, tt := range tests {
tt := tt
t.Run(tt, func(t *testing.T) {
expr, err := ParseLogSelector(tt)
if err != nil {
t.Fatalf("failed to parse log selector: %s", err)
}
if expr.String() != strings.Replace(tt, " ", "", -1) {
t.Fatalf("error expected: %s got: %s", tt, expr.String())
}
})
}
}

@ -0,0 +1,502 @@
package logql
import (
"container/heap"
"context"
"math"
"sort"
"time"
"github.com/grafana/loki/pkg/helpers"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
)
var (
queryTime = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "logql",
Name: "query_duration_seconds",
Help: "LogQL query timings",
Buckets: prometheus.DefBuckets,
}, []string{"query_type"})
)
// ValueTypeStreams promql.ValueType for log streams
const ValueTypeStreams = "streams"
// Streams is promql.Value
type Streams []*logproto.Stream
// Type implements `promql.Value`
func (Streams) Type() promql.ValueType { return ValueTypeStreams }
// String implements `promql.Value`
func (Streams) String() string {
return ""
}
// EngineOpts is the list of options to use with the LogQL query engine.
type EngineOpts struct {
// Timeout for queries execution
Timeout time.Duration `yaml:"timeout"`
// MaxLookBackPeriod is the maximun amount of time to look back for log lines.
// only used for instant log queries.
MaxLookBackPeriod time.Duration `yaml:"max_look_back_period"`
}
func (opts *EngineOpts) applyDefault() {
if opts.Timeout == 0 {
opts.Timeout = 3 * time.Minute
}
if opts.MaxLookBackPeriod == 0 {
opts.MaxLookBackPeriod = 30 * time.Second
}
}
// Engine is the LogQL engine.
type Engine struct {
timeout time.Duration
maxLookBackPeriod time.Duration
}
// NewEngine creates a new LogQL engine.
func NewEngine(opts EngineOpts) *Engine {
opts.applyDefault()
return &Engine{
timeout: opts.Timeout,
maxLookBackPeriod: opts.MaxLookBackPeriod,
}
}
// Query is a LogQL query to be executed.
type Query interface {
// Exec processes the query.
Exec(ctx context.Context) (promql.Value, error)
}
type query struct {
querier Querier
qs string
start, end time.Time
step time.Duration
direction logproto.Direction
limit uint32
ng *Engine
}
func (q *query) isInstant() bool {
return q.start == q.end && q.step == 0
}
// Exec Implements `Query`
func (q *query) Exec(ctx context.Context) (promql.Value, error) {
var queryType string
if q.isInstant() {
queryType = "instant"
} else {
queryType = "range"
}
timer := prometheus.NewTimer(queryTime.WithLabelValues(queryType))
defer timer.ObserveDuration()
return q.ng.exec(ctx, q)
}
// NewRangeQuery creates a new LogQL range query.
func (ng *Engine) NewRangeQuery(
q Querier,
qs string,
start, end time.Time, step time.Duration,
direction logproto.Direction, limit uint32) Query {
return &query{
querier: q,
qs: qs,
start: start,
end: end,
step: step,
direction: direction,
limit: limit,
ng: ng,
}
}
// NewInstantQuery creates a new LogQL instant query.
func (ng *Engine) NewInstantQuery(
q Querier,
qs string,
ts time.Time,
direction logproto.Direction, limit uint32) Query {
return &query{
querier: q,
qs: qs,
start: ts,
end: ts,
step: 0,
direction: direction,
limit: limit,
ng: ng,
}
}
func (ng *Engine) exec(ctx context.Context, q *query) (promql.Value, error) {
ctx, cancel := context.WithTimeout(ctx, ng.timeout)
defer cancel()
expr, err := ParseExpr(q.qs)
if err != nil {
return nil, err
}
switch e := expr.(type) {
case SampleExpr:
if err := ng.setupIterators(ctx, e, q); err != nil {
return nil, err
}
return ng.evalSample(e, q), nil
case LogSelectorExpr:
params := SelectParams{
QueryRequest: &logproto.QueryRequest{
Start: q.start,
End: q.end,
Limit: q.limit,
Direction: q.direction,
Selector: e.String(),
},
}
// instant query, we look back to find logs near the requested ts.
if q.isInstant() {
params.Start = params.Start.Add(-ng.maxLookBackPeriod)
}
iter, err := q.querier.Select(ctx, params)
if err != nil {
return nil, err
}
defer helpers.LogError("closing iterator", iter.Close)
return readStreams(iter, q.limit)
}
return nil, nil
}
// setupIterators walk through the AST tree and build iterators required to eval samples.
func (ng *Engine) setupIterators(ctx context.Context, expr SampleExpr, q *query) error {
if expr == nil {
return nil
}
switch e := expr.(type) {
case *vectorAggregationExpr:
return ng.setupIterators(ctx, e.left, q)
case *rangeAggregationExpr:
iter, err := q.querier.Select(ctx, SelectParams{
&logproto.QueryRequest{
Start: q.start.Add(-e.left.interval),
End: q.end,
Limit: 0,
Direction: logproto.FORWARD,
Selector: e.Selector().String(),
},
})
e.iterator = newRangeVectorIterator(iter, e.left.interval.Nanoseconds(), q.step.Nanoseconds(),
q.start.UnixNano(), q.end.UnixNano())
if err != nil {
return err
}
}
return nil
}
// evalSample evaluate a sampleExpr
func (ng *Engine) evalSample(expr SampleExpr, q *query) promql.Value {
defer helpers.LogError("closing SampleExpr", expr.Close)
stepEvaluator := expr.Evaluator()
seriesIndex := map[uint64]*promql.Series{}
next, ts, vec := stepEvaluator.Next()
if q.isInstant() {
return vec
}
for next {
for _, p := range vec {
var (
series *promql.Series
hash = p.Metric.Hash()
ok bool
)
series, ok = seriesIndex[hash]
if !ok {
series = &promql.Series{
Metric: p.Metric,
}
seriesIndex[hash] = series
}
series.Points = append(series.Points, promql.Point{
T: ts,
V: p.V,
})
}
next, ts, vec = stepEvaluator.Next()
}
series := make([]promql.Series, 0, len(seriesIndex))
for _, s := range seriesIndex {
series = append(series, *s)
}
result := promql.Matrix(series)
sort.Sort(result)
return result
}
func readStreams(i iter.EntryIterator, size uint32) (Streams, error) {
streams := map[string]*logproto.Stream{}
respSize := uint32(0)
for ; respSize < size && i.Next(); respSize++ {
labels, entry := i.Labels(), i.Entry()
stream, ok := streams[labels]
if !ok {
stream = &logproto.Stream{
Labels: labels,
}
streams[labels] = stream
}
stream.Entries = append(stream.Entries, entry)
}
result := make([]*logproto.Stream, 0, len(streams))
for _, stream := range streams {
result = append(result, stream)
}
return result, i.Error()
}
type groupedAggregation struct {
labels labels.Labels
value float64
mean float64
groupCount int
heap vectorByValueHeap
reverseHeap vectorByReverseValueHeap
}
// Evaluator implements `SampleExpr` for a vectorAggregationExpr
// this is copied and adapted from Prometheus vector aggregation code.
func (v *vectorAggregationExpr) Evaluator() StepEvaluator {
return StepEvaluatorFn(func() (bool, int64, promql.Vector) {
next, ts, vec := v.left.Evaluator().Next()
if !next {
return false, 0, promql.Vector{}
}
result := map[uint64]*groupedAggregation{}
if v.operation == OpTypeTopK || v.operation == OpTypeBottomK {
if v.params < 1 {
return next, ts, promql.Vector{}
}
}
for _, s := range vec {
metric := s.Metric
var (
groupingKey uint64
)
if v.grouping.without {
groupingKey = metric.HashWithoutLabels(v.grouping.groups...)
} else {
groupingKey = metric.HashForLabels(v.grouping.groups...)
}
group, ok := result[groupingKey]
// Add a new group if it doesn't exist.
if !ok {
var m labels.Labels
if v.grouping.without {
lb := labels.NewBuilder(metric)
lb.Del(v.grouping.groups...)
lb.Del(labels.MetricName)
m = lb.Labels()
} else {
m = make(labels.Labels, 0, len(v.grouping.groups))
for _, l := range metric {
for _, n := range v.grouping.groups {
if l.Name == n {
m = append(m, l)
break
}
}
}
sort.Sort(m)
}
result[groupingKey] = &groupedAggregation{
labels: m,
value: s.V,
mean: s.V,
groupCount: 1,
}
inputVecLen := len(vec)
resultSize := v.params
if v.params > inputVecLen {
resultSize = inputVecLen
}
if v.operation == OpTypeStdvar || v.operation == OpTypeStddev {
result[groupingKey].value = 0.0
} else if v.operation == OpTypeTopK {
result[groupingKey].heap = make(vectorByValueHeap, 0, resultSize)
heap.Push(&result[groupingKey].heap, &promql.Sample{
Point: promql.Point{V: s.V},
Metric: s.Metric,
})
} else if v.operation == OpTypeBottomK {
result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, resultSize)
heap.Push(&result[groupingKey].reverseHeap, &promql.Sample{
Point: promql.Point{V: s.V},
Metric: s.Metric,
})
}
continue
}
switch v.operation {
case OpTypeSum:
group.value += s.V
case OpTypeAvg:
group.groupCount++
group.mean += (s.V - group.mean) / float64(group.groupCount)
case OpTypeMax:
if group.value < s.V || math.IsNaN(group.value) {
group.value = s.V
}
case OpTypeMin:
if group.value > s.V || math.IsNaN(group.value) {
group.value = s.V
}
case OpTypeCount:
group.groupCount++
case OpTypeStddev, OpTypeStdvar:
group.groupCount++
delta := s.V - group.mean
group.mean += delta / float64(group.groupCount)
group.value += delta * (s.V - group.mean)
case OpTypeTopK:
if len(group.heap) < v.params || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) {
if len(group.heap) == v.params {
heap.Pop(&group.heap)
}
heap.Push(&group.heap, &promql.Sample{
Point: promql.Point{V: s.V},
Metric: s.Metric,
})
}
case OpTypeBottomK:
if len(group.reverseHeap) < v.params || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) {
if len(group.reverseHeap) == v.params {
heap.Pop(&group.reverseHeap)
}
heap.Push(&group.reverseHeap, &promql.Sample{
Point: promql.Point{V: s.V},
Metric: s.Metric,
})
}
default:
panic(errors.Errorf("expected aggregation operator but got %q", v.operation))
}
}
vec = vec[:0]
for _, aggr := range result {
switch v.operation {
case OpTypeAvg:
aggr.value = aggr.mean
case OpTypeCount:
aggr.value = float64(aggr.groupCount)
case OpTypeStddev:
aggr.value = math.Sqrt(aggr.value / float64(aggr.groupCount))
case OpTypeStdvar:
aggr.value = aggr.value / float64(aggr.groupCount)
case OpTypeTopK:
// The heap keeps the lowest value on top, so reverse it.
sort.Sort(sort.Reverse(aggr.heap))
for _, v := range aggr.heap {
vec = append(vec, promql.Sample{
Metric: v.Metric,
Point: promql.Point{
T: ts,
V: aggr.value,
},
})
}
continue // Bypass default append.
case OpTypeBottomK:
// The heap keeps the lowest value on top, so reverse it.
sort.Sort(sort.Reverse(aggr.reverseHeap))
for _, v := range aggr.reverseHeap {
vec = append(vec, promql.Sample{
Metric: v.Metric,
Point: promql.Point{
T: ts,
V: v.V,
},
})
}
continue // Bypass default append.
default:
}
vec = append(vec, promql.Sample{
Metric: aggr.labels,
Point: promql.Point{
T: ts,
V: aggr.value,
},
})
}
return next, ts, vec
})
}
// Evaluator implements `SampleExpr` for a rangeAggregationExpr
func (e *rangeAggregationExpr) Evaluator() StepEvaluator {
var fn RangeVectorAggregator
switch e.operation {
case OpTypeRate:
fn = rate(e.left.interval)
case OpTypeCountOverTime:
fn = count
}
return StepEvaluatorFn(func() (bool, int64, promql.Vector) {
next := e.iterator.Next()
if !next {
return false, 0, promql.Vector{}
}
ts, vec := e.iterator.At(fn)
return true, ts, vec
})
}
// rate calculate the per-second rate of log lines.
func rate(selRange time.Duration) func(ts int64, samples []promql.Point) float64 {
return func(ts int64, samples []promql.Point) float64 {
return float64(len(samples)) / selRange.Seconds()
}
}
// count counts the amount of log lines.
func count(ts int64, samples []promql.Point) float64 {
return float64(len(samples))
}

@ -2,40 +2,86 @@
package logql
import (
"time"
"github.com/prometheus/prometheus/pkg/labels"
)
%}
%union{
Expr Expr
Filter labels.MatchType
Selector []*labels.Matcher
Matchers []*labels.Matcher
Matcher *labels.Matcher
str string
int int64
Expr Expr
LogExpr LogSelectorExpr
RangeAggregationExpr SampleExpr
VectorAggregationExpr SampleExpr
LogRangeExpr *logRange
Filter labels.MatchType
Selector []*labels.Matcher
Matchers []*labels.Matcher
Matcher *labels.Matcher
Grouping *grouping
Labels []string
VectorOp string
RangeOp string
str string
duration time.Duration
int int64
}
%start root
%type <Expr> expr
%type <Filter> filter
%type <Selector> selector
%type <Matchers> matchers
%type <Matcher> matcher
%type <Expr> expr
%type <Filter> filter
%type <Selector> selector
%type <Matchers> matchers
%type <Matcher> matcher
%type <VectorOp> vectorOp
%type <RangeOp> rangeOp
%type <Labels> labels
%type <Grouping> grouping
%type <LogExpr> logExpr
%type <RangeAggregationExpr> rangeAggregationExpr
%type <VectorAggregationExpr> vectorAggregationExpr
%type <LogRangeExpr> logRangeExpr
%token <str> IDENTIFIER STRING
%token <val> MATCHERS LABELS EQ NEQ RE NRE OPEN_BRACE CLOSE_BRACE COMMA DOT PIPE_MATCH PIPE_EXACT
%token <str> IDENTIFIER STRING
%token <duration> DURATION
%token <val> MATCHERS LABELS EQ NEQ RE NRE OPEN_BRACE CLOSE_BRACE OPEN_BRACKET CLOSE_BRACKET COMMA DOT PIPE_MATCH PIPE_EXACT
OPEN_PARENTHESIS CLOSE_PARENTHESIS BY WITHOUT COUNT_OVER_TIME RATE SUM AVG MAX MIN COUNT STDDEV STDVAR BOTTOMK TOPK
%%
root: expr { exprlex.(*lexer).expr = $1 };
expr:
selector { $$ = &matchersExpr{ matchers: $1 } }
| expr filter STRING { $$ = NewFilterExpr( $1, $2, $3 ) }
| expr filter error
| expr error
logExpr { $$ = $1 }
| rangeAggregationExpr { $$ = $1 }
| vectorAggregationExpr { $$ = $1 }
;
logExpr:
selector { $$ = newMatcherExpr($1)}
| logExpr filter STRING { $$ = NewFilterExpr( $1, $2, $3 ) }
| OPEN_PARENTHESIS logExpr CLOSE_PARENTHESIS { $$ = $2}
| logExpr filter error
| logExpr error
;
logRangeExpr: logExpr DURATION { $$ = mustNewRange($1, $2) };
rangeAggregationExpr: rangeOp OPEN_PARENTHESIS logRangeExpr CLOSE_PARENTHESIS { $$ = newRangeAggregationExpr($3,$1) };
vectorAggregationExpr:
vectorOp OPEN_PARENTHESIS rangeAggregationExpr CLOSE_PARENTHESIS { $$ = mustNewVectorAggregationExpr($3, $1, nil, nil) }
| vectorOp grouping OPEN_PARENTHESIS rangeAggregationExpr CLOSE_PARENTHESIS { $$ = mustNewVectorAggregationExpr($4, $1, $2, nil,) }
| vectorOp OPEN_PARENTHESIS rangeAggregationExpr CLOSE_PARENTHESIS grouping { $$ = mustNewVectorAggregationExpr($3, $1, $5, nil) }
| vectorOp OPEN_PARENTHESIS IDENTIFIER COMMA rangeAggregationExpr CLOSE_PARENTHESIS { $$ = mustNewVectorAggregationExpr($5, $1, nil, &$3) }
| vectorOp OPEN_PARENTHESIS IDENTIFIER COMMA rangeAggregationExpr CLOSE_PARENTHESIS grouping { $$ = mustNewVectorAggregationExpr($5, $1, $7, &$3) }
| vectorOp OPEN_PARENTHESIS vectorAggregationExpr CLOSE_PARENTHESIS { $$ = mustNewVectorAggregationExpr($3, $1, nil, nil) }
| vectorOp grouping OPEN_PARENTHESIS vectorAggregationExpr CLOSE_PARENTHESIS { $$ = mustNewVectorAggregationExpr($4, $1, $2, nil,) }
| vectorOp OPEN_PARENTHESIS vectorAggregationExpr CLOSE_PARENTHESIS grouping { $$ = mustNewVectorAggregationExpr($3, $1, $5, nil) }
| vectorOp OPEN_PARENTHESIS IDENTIFIER COMMA vectorAggregationExpr CLOSE_PARENTHESIS { $$ = mustNewVectorAggregationExpr($5, $1, nil, &$3) }
| vectorOp OPEN_PARENTHESIS IDENTIFIER COMMA vectorAggregationExpr CLOSE_PARENTHESIS grouping { $$ = mustNewVectorAggregationExpr($5, $1, $7, &$3) }
;
filter:
@ -62,4 +108,31 @@ matcher:
| IDENTIFIER RE STRING { $$ = mustNewMatcher(labels.MatchRegexp, $1, $3) }
| IDENTIFIER NRE STRING { $$ = mustNewMatcher(labels.MatchNotRegexp, $1, $3) }
;
%%
vectorOp:
SUM { $$ = OpTypeSum }
| AVG { $$ = OpTypeAvg }
| COUNT { $$ = OpTypeCount }
| MAX { $$ = OpTypeMax }
| MIN { $$ = OpTypeMin }
| STDDEV { $$ = OpTypeStddev }
| STDVAR { $$ = OpTypeStdvar }
| BOTTOMK { $$ = OpTypeBottomK }
| TOPK { $$ = OpTypeTopK }
;
rangeOp:
COUNT_OVER_TIME { $$ = OpTypeCountOverTime }
| RATE { $$ = OpTypeRate }
labels:
IDENTIFIER { $$ = []string{ $1 } }
| labels COMMA IDENTIFIER { $$ = append($1, $3) }
;
grouping:
BY OPEN_PARENTHESIS labels CLOSE_PARENTHESIS { $$ = &grouping{ without: false , groups: $3 } }
| WITHOUT OPEN_PARENTHESIS labels CLOSE_PARENTHESIS { $$ = &grouping{ without: true , groups: $3 } }
;
%%

@ -9,34 +9,62 @@ import __yyfmt__ "fmt"
import (
"github.com/prometheus/prometheus/pkg/labels"
"time"
)
//line pkg/logql/expr.y:9
//line pkg/logql/expr.y:10
type exprSymType struct {
yys int
Expr Expr
Filter labels.MatchType
Selector []*labels.Matcher
Matchers []*labels.Matcher
Matcher *labels.Matcher
str string
int int64
yys int
Expr Expr
LogExpr LogSelectorExpr
RangeAggregationExpr SampleExpr
VectorAggregationExpr SampleExpr
LogRangeExpr *logRange
Filter labels.MatchType
Selector []*labels.Matcher
Matchers []*labels.Matcher
Matcher *labels.Matcher
Grouping *grouping
Labels []string
VectorOp string
RangeOp string
str string
duration time.Duration
int int64
}
const IDENTIFIER = 57346
const STRING = 57347
const MATCHERS = 57348
const LABELS = 57349
const EQ = 57350
const NEQ = 57351
const RE = 57352
const NRE = 57353
const OPEN_BRACE = 57354
const CLOSE_BRACE = 57355
const COMMA = 57356
const DOT = 57357
const PIPE_MATCH = 57358
const PIPE_EXACT = 57359
const DURATION = 57348
const MATCHERS = 57349
const LABELS = 57350
const EQ = 57351
const NEQ = 57352
const RE = 57353
const NRE = 57354
const OPEN_BRACE = 57355
const CLOSE_BRACE = 57356
const OPEN_BRACKET = 57357
const CLOSE_BRACKET = 57358
const COMMA = 57359
const DOT = 57360
const PIPE_MATCH = 57361
const PIPE_EXACT = 57362
const OPEN_PARENTHESIS = 57363
const CLOSE_PARENTHESIS = 57364
const BY = 57365
const WITHOUT = 57366
const COUNT_OVER_TIME = 57367
const RATE = 57368
const SUM = 57369
const AVG = 57370
const MAX = 57371
const MIN = 57372
const COUNT = 57373
const STDDEV = 57374
const STDVAR = 57375
const BOTTOMK = 57376
const TOPK = 57377
var exprToknames = [...]string{
"$end",
@ -44,6 +72,7 @@ var exprToknames = [...]string{
"$unk",
"IDENTIFIER",
"STRING",
"DURATION",
"MATCHERS",
"LABELS",
"EQ",
@ -52,10 +81,27 @@ var exprToknames = [...]string{
"NRE",
"OPEN_BRACE",
"CLOSE_BRACE",
"OPEN_BRACKET",
"CLOSE_BRACKET",
"COMMA",
"DOT",
"PIPE_MATCH",
"PIPE_EXACT",
"OPEN_PARENTHESIS",
"CLOSE_PARENTHESIS",
"BY",
"WITHOUT",
"COUNT_OVER_TIME",
"RATE",
"SUM",
"AVG",
"MAX",
"MIN",
"COUNT",
"STDDEV",
"STDVAR",
"BOTTOMK",
"TOPK",
}
var exprStatenames = [...]string{}
@ -63,59 +109,94 @@ const exprEofCode = 1
const exprErrCode = 2
const exprInitialStackSize = 16
//line pkg/logql/expr.y:65
//line pkg/logql/expr.y:138
//line yacctab:1
var exprExca = [...]int{
-1, 1,
1, -1,
-2, 0,
-1, 2,
1, 1,
-1, 3,
1, 2,
-2, 0,
}
const exprPrivate = 57344
const exprLast = 30
const exprLast = 123
var exprAct = [...]int{
6, 13, 20, 4, 29, 18, 28, 10, 14, 9,
21, 22, 23, 24, 7, 8, 17, 19, 27, 16,
26, 25, 15, 12, 11, 14, 3, 5, 2, 1,
31, 36, 5, 4, 10, 64, 30, 48, 32, 33,
32, 33, 7, 47, 44, 82, 11, 12, 13, 14,
16, 17, 15, 18, 19, 20, 21, 81, 77, 76,
61, 59, 46, 45, 43, 11, 12, 13, 14, 16,
17, 15, 18, 19, 20, 21, 78, 3, 57, 63,
62, 80, 78, 67, 66, 28, 60, 79, 29, 52,
72, 71, 75, 74, 73, 11, 12, 13, 14, 16,
17, 15, 18, 19, 20, 21, 23, 42, 70, 83,
10, 23, 84, 85, 27, 58, 26, 23, 7, 27,
37, 26, 69, 24, 25, 27, 40, 26, 24, 25,
50, 68, 41, 65, 24, 25, 53, 54, 55, 56,
39, 8, 49, 38, 35, 51, 37, 9, 34, 6,
22, 2, 1,
}
var exprPact = [...]int{
-9, -1000, -2, -1000, 21, 17, -1000, -1000, -1000, -1000,
-1000, 3, -11, -1000, 2, -1000, -1000, -1000, -1000, 4,
-1000, 15, 13, 1, -1, -1000, -1000, -1000, -1000, -1000,
-9, -1000, -1000, 85, -1000, -1000, -1000, 67, 37, -15,
112, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, 108, -1000, -1000, -1000, -1000, -1000, 74, 67,
10, 11, -8, -14, 98, 45, -1000, 97, -1000, -1000,
-1000, 26, 79, 9, 39, 8, 40, 99, 99, -1000,
-1000, 86, -1000, 96, 87, 73, 56, -1000, -1000, -13,
40, -13, 7, 6, 35, -1000, 29, -1000, -1000, -1000,
-1000, -1000, -1000, 5, -7, -1000, -1000, -1000, 75, -1000,
-1000, -13, -13, -1000, -1000, -1000,
}
var exprPgo = [...]int{
0, 29, 28, 27, 26, 24, 1,
0, 122, 121, 120, 119, 118, 1, 117, 111, 5,
0, 47, 3, 2, 102,
}
var exprR1 = [...]int{
0, 1, 2, 2, 2, 2, 3, 3, 3, 3,
4, 4, 4, 5, 5, 6, 6, 6, 6,
0, 1, 2, 2, 2, 11, 11, 11, 11, 11,
14, 12, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 3, 3, 3, 3, 4, 4, 4, 5,
5, 6, 6, 6, 6, 7, 7, 7, 7, 7,
7, 7, 7, 7, 8, 8, 9, 9, 10, 10,
}
var exprR2 = [...]int{
0, 1, 1, 3, 3, 2, 1, 1, 1, 1,
3, 3, 3, 1, 3, 3, 3, 3, 3,
0, 1, 1, 1, 1, 1, 3, 3, 3, 2,
2, 4, 4, 5, 5, 6, 7, 4, 5, 5,
6, 7, 1, 1, 1, 1, 3, 3, 3, 1,
3, 3, 3, 3, 3, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 3, 4, 4,
}
var exprChk = [...]int{
-1000, -1, -2, -4, 12, -3, 2, 16, 17, 11,
9, -5, 2, -6, 4, 5, 2, 13, 2, 14,
13, 8, 9, 10, 11, -6, 5, 5, 5, 5,
-1000, -1, -2, -11, -12, -13, -4, 21, -8, -7,
13, 25, 26, 27, 28, 31, 29, 30, 32, 33,
34, 35, -3, 2, 19, 20, 12, 10, -11, 21,
21, -10, 23, 24, -5, 2, -6, 4, 5, 2,
22, -14, -11, -12, 4, -13, 21, 21, 21, 14,
2, 17, 14, 9, 10, 11, 12, 22, 6, 22,
17, 22, -12, -13, -9, 4, -9, -6, 5, 5,
5, 5, -10, -12, -13, -10, 22, 22, 17, 22,
22, 22, 22, 4, -10, -10,
}
var exprDef = [...]int{
0, -2, -2, 2, 0, 0, 5, 6, 7, 8,
9, 0, 0, 13, 0, 3, 4, 10, 11, 0,
12, 0, 0, 0, 0, 14, 15, 16, 17, 18,
0, -2, 1, -2, 3, 4, 5, 0, 0, 0,
0, 44, 45, 35, 36, 37, 38, 39, 40, 41,
42, 43, 0, 9, 22, 23, 24, 25, 0, 0,
0, 0, 0, 0, 0, 0, 29, 0, 6, 8,
7, 0, 0, 0, 0, 0, 0, 0, 0, 26,
27, 0, 28, 0, 0, 0, 0, 11, 10, 12,
0, 17, 0, 0, 0, 46, 0, 30, 31, 32,
33, 34, 14, 0, 0, 19, 13, 18, 0, 48,
49, 15, 20, 47, 16, 21,
}
var exprTok1 = [...]int{
@ -124,7 +205,9 @@ var exprTok1 = [...]int{
var exprTok2 = [...]int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35,
}
var exprTok3 = [...]int{
0,
@ -469,99 +552,285 @@ exprdefault:
case 1:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:32
//line pkg/logql/expr.y:52
{
exprlex.(*lexer).expr = exprDollar[1].Expr
}
case 2:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:35
//line pkg/logql/expr.y:55
{
exprVAL.Expr = &matchersExpr{matchers: exprDollar[1].Selector}
exprVAL.Expr = exprDollar[1].LogExpr
}
case 3:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:36
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:56
{
exprVAL.Expr = exprDollar[1].RangeAggregationExpr
}
case 4:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:57
{
exprVAL.Expr = NewFilterExpr(exprDollar[1].Expr, exprDollar[2].Filter, exprDollar[3].str)
exprVAL.Expr = exprDollar[1].VectorAggregationExpr
}
case 5:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:61
{
exprVAL.LogExpr = newMatcherExpr(exprDollar[1].Selector)
}
case 6:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:62
{
exprVAL.LogExpr = NewFilterExpr(exprDollar[1].LogExpr, exprDollar[2].Filter, exprDollar[3].str)
}
case 7:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:63
{
exprVAL.LogExpr = exprDollar[2].LogExpr
}
case 10:
exprDollar = exprS[exprpt-2 : exprpt+1]
//line pkg/logql/expr.y:69
{
exprVAL.LogRangeExpr = mustNewRange(exprDollar[1].LogExpr, exprDollar[2].duration)
}
case 11:
exprDollar = exprS[exprpt-4 : exprpt+1]
//line pkg/logql/expr.y:72
{
exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[3].LogRangeExpr, exprDollar[1].RangeOp)
}
case 12:
exprDollar = exprS[exprpt-4 : exprpt+1]
//line pkg/logql/expr.y:75
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].RangeAggregationExpr, exprDollar[1].VectorOp, nil, nil)
}
case 13:
exprDollar = exprS[exprpt-5 : exprpt+1]
//line pkg/logql/expr.y:76
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[4].RangeAggregationExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, nil)
}
case 14:
exprDollar = exprS[exprpt-5 : exprpt+1]
//line pkg/logql/expr.y:77
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].RangeAggregationExpr, exprDollar[1].VectorOp, exprDollar[5].Grouping, nil)
}
case 15:
exprDollar = exprS[exprpt-6 : exprpt+1]
//line pkg/logql/expr.y:78
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].RangeAggregationExpr, exprDollar[1].VectorOp, nil, &exprDollar[3].str)
}
case 16:
exprDollar = exprS[exprpt-7 : exprpt+1]
//line pkg/logql/expr.y:79
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].RangeAggregationExpr, exprDollar[1].VectorOp, exprDollar[7].Grouping, &exprDollar[3].str)
}
case 17:
exprDollar = exprS[exprpt-4 : exprpt+1]
//line pkg/logql/expr.y:80
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].VectorAggregationExpr, exprDollar[1].VectorOp, nil, nil)
}
case 18:
exprDollar = exprS[exprpt-5 : exprpt+1]
//line pkg/logql/expr.y:81
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[4].VectorAggregationExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, nil)
}
case 19:
exprDollar = exprS[exprpt-5 : exprpt+1]
//line pkg/logql/expr.y:82
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].VectorAggregationExpr, exprDollar[1].VectorOp, exprDollar[5].Grouping, nil)
}
case 20:
exprDollar = exprS[exprpt-6 : exprpt+1]
//line pkg/logql/expr.y:83
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].VectorAggregationExpr, exprDollar[1].VectorOp, nil, &exprDollar[3].str)
}
case 21:
exprDollar = exprS[exprpt-7 : exprpt+1]
//line pkg/logql/expr.y:84
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].VectorAggregationExpr, exprDollar[1].VectorOp, exprDollar[7].Grouping, &exprDollar[3].str)
}
case 22:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:42
//line pkg/logql/expr.y:88
{
exprVAL.Filter = labels.MatchRegexp
}
case 7:
case 23:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:43
//line pkg/logql/expr.y:89
{
exprVAL.Filter = labels.MatchEqual
}
case 8:
case 24:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:44
//line pkg/logql/expr.y:90
{
exprVAL.Filter = labels.MatchNotRegexp
}
case 9:
case 25:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:45
//line pkg/logql/expr.y:91
{
exprVAL.Filter = labels.MatchNotEqual
}
case 10:
case 26:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:49
//line pkg/logql/expr.y:95
{
exprVAL.Selector = exprDollar[2].Matchers
}
case 11:
case 27:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:50
//line pkg/logql/expr.y:96
{
exprVAL.Selector = exprDollar[2].Matchers
}
case 12:
case 28:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:51
//line pkg/logql/expr.y:97
{
}
case 13:
case 29:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:55
//line pkg/logql/expr.y:101
{
exprVAL.Matchers = []*labels.Matcher{exprDollar[1].Matcher}
}
case 14:
case 30:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:56
//line pkg/logql/expr.y:102
{
exprVAL.Matchers = append(exprDollar[1].Matchers, exprDollar[3].Matcher)
}
case 15:
case 31:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:60
//line pkg/logql/expr.y:106
{
exprVAL.Matcher = mustNewMatcher(labels.MatchEqual, exprDollar[1].str, exprDollar[3].str)
}
case 16:
case 32:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:61
//line pkg/logql/expr.y:107
{
exprVAL.Matcher = mustNewMatcher(labels.MatchNotEqual, exprDollar[1].str, exprDollar[3].str)
}
case 17:
case 33:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:62
//line pkg/logql/expr.y:108
{
exprVAL.Matcher = mustNewMatcher(labels.MatchRegexp, exprDollar[1].str, exprDollar[3].str)
}
case 18:
case 34:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:63
//line pkg/logql/expr.y:109
{
exprVAL.Matcher = mustNewMatcher(labels.MatchNotRegexp, exprDollar[1].str, exprDollar[3].str)
}
case 35:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:113
{
exprVAL.VectorOp = OpTypeSum
}
case 36:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:114
{
exprVAL.VectorOp = OpTypeAvg
}
case 37:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:115
{
exprVAL.VectorOp = OpTypeCount
}
case 38:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:116
{
exprVAL.VectorOp = OpTypeMax
}
case 39:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:117
{
exprVAL.VectorOp = OpTypeMin
}
case 40:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:118
{
exprVAL.VectorOp = OpTypeStddev
}
case 41:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:119
{
exprVAL.VectorOp = OpTypeStdvar
}
case 42:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:120
{
exprVAL.VectorOp = OpTypeBottomK
}
case 43:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:121
{
exprVAL.VectorOp = OpTypeTopK
}
case 44:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:125
{
exprVAL.RangeOp = OpTypeCountOverTime
}
case 45:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:126
{
exprVAL.RangeOp = OpTypeRate
}
case 46:
exprDollar = exprS[exprpt-1 : exprpt+1]
//line pkg/logql/expr.y:130
{
exprVAL.Labels = []string{exprDollar[1].str}
}
case 47:
exprDollar = exprS[exprpt-3 : exprpt+1]
//line pkg/logql/expr.y:131
{
exprVAL.Labels = append(exprDollar[1].Labels, exprDollar[3].str)
}
case 48:
exprDollar = exprS[exprpt-4 : exprpt+1]
//line pkg/logql/expr.y:135
{
exprVAL.Grouping = &grouping{without: false, groups: exprDollar[3].Labels}
}
case 49:
exprDollar = exprS[exprpt-4 : exprpt+1]
//line pkg/logql/expr.y:136
{
exprVAL.Grouping = &grouping{without: true, groups: exprDollar[3].Labels}
}
}
goto exprstack /* stack new state and value */
}

@ -6,6 +6,7 @@ import (
"strconv"
"strings"
"text/scanner"
"time"
"github.com/prometheus/prometheus/pkg/labels"
)
@ -19,7 +20,16 @@ func init() {
}
// ParseExpr parses a string and returns an Expr.
func ParseExpr(input string) (Expr, error) {
func ParseExpr(input string) (expr Expr, err error) {
defer func() {
r := recover()
if r != nil {
var ok bool
if err, ok = r.(error); ok {
return
}
}
}()
l := lexer{
parser: exprNewParser().(*exprParserImpl),
}
@ -27,7 +37,6 @@ func ParseExpr(input string) (Expr, error) {
l.Scanner.Error = func(_ *scanner.Scanner, msg string) {
l.Error(msg)
}
e := l.parser.Parse(&l)
if e != 0 || len(l.errs) > 0 {
return nil, l.errs[0]
@ -49,17 +58,46 @@ func ParseMatchers(input string) ([]*labels.Matcher, error) {
return matcherExpr.matchers, nil
}
func ParseLogSelector(input string) (LogSelectorExpr, error) {
expr, err := ParseExpr(input)
if err != nil {
return nil, err
}
logSelector, ok := expr.(LogSelectorExpr)
if !ok {
return nil, errors.New("only log selector is supported")
}
return logSelector, nil
}
var tokens = map[string]int{
",": COMMA,
".": DOT,
"{": OPEN_BRACE,
"}": CLOSE_BRACE,
"=": EQ,
"!=": NEQ,
"=~": RE,
"!~": NRE,
"|=": PIPE_EXACT,
"|~": PIPE_MATCH,
",": COMMA,
".": DOT,
"{": OPEN_BRACE,
"}": CLOSE_BRACE,
"=": EQ,
"!=": NEQ,
"=~": RE,
"!~": NRE,
"|=": PIPE_EXACT,
"|~": PIPE_MATCH,
"(": OPEN_PARENTHESIS,
")": CLOSE_PARENTHESIS,
"by": BY,
"without": WITHOUT,
OpTypeCountOverTime: COUNT_OVER_TIME,
"[": OPEN_BRACKET,
"]": CLOSE_BRACKET,
OpTypeRate: RATE,
OpTypeSum: SUM,
OpTypeAvg: AVG,
OpTypeMax: MAX,
OpTypeMin: MIN,
OpTypeCount: COUNT,
OpTypeStddev: STDDEV,
OpTypeStdvar: STDVAR,
OpTypeBottomK: BOTTOMK,
OpTypeTopK: TOPK,
}
type lexer struct {
@ -71,7 +109,6 @@ type lexer struct {
func (l *lexer) Lex(lval *exprSymType) int {
r := l.Scan()
switch r {
case scanner.EOF:
return 0
@ -86,6 +123,25 @@ func (l *lexer) Lex(lval *exprSymType) int {
return STRING
}
// scaning duration tokens
if l.TokenText() == "[" {
d := ""
for r := l.Next(); r != scanner.EOF; r = l.Next() {
if string(r) == "]" {
i, err := time.ParseDuration(d)
if err != nil {
l.Error(err.Error())
return 0
}
lval.duration = i
return DURATION
}
d += string(r)
}
l.Error("missing closing ']' in duration")
return 0
}
if tok, ok := tokens[l.TokenText()+string(l.Peek())]; ok {
l.Next()
return tok
@ -100,11 +156,7 @@ func (l *lexer) Lex(lval *exprSymType) int {
}
func (l *lexer) Error(msg string) {
l.errs = append(l.errs, ParseError{
msg: msg,
line: l.Line,
col: l.Column,
})
l.errs = append(l.errs, newParseError(msg, l.Line, l.Column))
}
// ParseError is what is returned when we failed to parse.
@ -114,5 +166,16 @@ type ParseError struct {
}
func (p ParseError) Error() string {
if p.col == 0 && p.line == 0 {
return fmt.Sprintf("parse error : %s", p.msg)
}
return fmt.Sprintf("parse error at line %d, col %d: %s", p.line, p.col, p.msg)
}
func newParseError(msg string, line, col int) ParseError {
return ParseError{
msg: msg,
line: line,
col: col,
}
}

@ -4,6 +4,7 @@ import (
"strings"
"testing"
"text/scanner"
"time"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/stretchr/testify/require"
@ -22,6 +23,12 @@ func TestLex(t *testing.T) {
{`{ foo = "bar", bar != "baz" }`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING,
COMMA, IDENTIFIER, NEQ, STRING, CLOSE_BRACE}},
{`{ foo = "ba\"r" }`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE}},
{`rate({foo="bar"}[10s])`, []int{RATE, OPEN_PARENTHESIS, OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, DURATION, CLOSE_PARENTHESIS}},
{`count_over_time({foo="bar"}[5m])`, []int{COUNT_OVER_TIME, OPEN_PARENTHESIS, OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, DURATION, CLOSE_PARENTHESIS}},
{`sum(count_over_time({foo="bar"}[5m])) by (foo,bar)`, []int{SUM, OPEN_PARENTHESIS, COUNT_OVER_TIME, OPEN_PARENTHESIS, OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, DURATION, CLOSE_PARENTHESIS, CLOSE_PARENTHESIS, BY, OPEN_PARENTHESIS, IDENTIFIER, COMMA, IDENTIFIER, CLOSE_PARENTHESIS}},
{`topk(3,count_over_time({foo="bar"}[5m])) by (foo,bar)`, []int{TOPK, OPEN_PARENTHESIS, IDENTIFIER, COMMA, COUNT_OVER_TIME, OPEN_PARENTHESIS, OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, DURATION, CLOSE_PARENTHESIS, CLOSE_PARENTHESIS, BY, OPEN_PARENTHESIS, IDENTIFIER, COMMA, IDENTIFIER, CLOSE_PARENTHESIS}},
{`bottomk(10,sum(count_over_time({foo="bar"}[5m])) by (foo,bar))`, []int{BOTTOMK, OPEN_PARENTHESIS, IDENTIFIER, COMMA, SUM, OPEN_PARENTHESIS, COUNT_OVER_TIME, OPEN_PARENTHESIS, OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, DURATION, CLOSE_PARENTHESIS, CLOSE_PARENTHESIS, BY, OPEN_PARENTHESIS, IDENTIFIER, COMMA, IDENTIFIER, CLOSE_PARENTHESIS, CLOSE_PARENTHESIS}},
{`sum(max(rate({foo="bar"}[5m])) by (foo,bar)) by (foo)`, []int{SUM, OPEN_PARENTHESIS, MAX, OPEN_PARENTHESIS, RATE, OPEN_PARENTHESIS, OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, DURATION, CLOSE_PARENTHESIS, CLOSE_PARENTHESIS, BY, OPEN_PARENTHESIS, IDENTIFIER, COMMA, IDENTIFIER, CLOSE_PARENTHESIS, CLOSE_PARENTHESIS, BY, OPEN_PARENTHESIS, IDENTIFIER, CLOSE_PARENTHESIS}},
} {
t.Run(tc.input, func(t *testing.T) {
actual := []int{}
@ -44,6 +51,10 @@ func TestLex(t *testing.T) {
}
}
func newString(s string) *string {
return &s
}
func TestParse(t *testing.T) {
for _, tc := range []struct {
in string
@ -70,6 +81,169 @@ func TestParse(t *testing.T) {
in: `{ foo !~ "bar" }`,
exp: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchNotRegexp, "foo", "bar")}},
},
{
in: `count_over_time({ foo !~ "bar" }[12m])`,
exp: &rangeAggregationExpr{
left: &logRange{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchNotRegexp, "foo", "bar")}},
interval: 12 * time.Minute,
},
operation: "count_over_time",
},
},
{
in: `rate({ foo !~ "bar" }[5h])`,
exp: &rangeAggregationExpr{
left: &logRange{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchNotRegexp, "foo", "bar")}},
interval: 5 * time.Hour,
},
operation: "rate",
},
},
{
in: `sum(rate({ foo !~ "bar" }[5h]))`,
exp: mustNewVectorAggregationExpr(&rangeAggregationExpr{
left: &logRange{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchNotRegexp, "foo", "bar")}},
interval: 5 * time.Hour,
},
operation: "rate",
}, "sum", nil, nil),
},
{
in: `avg(count_over_time({ foo !~ "bar" }[5h])) by (bar,foo)`,
exp: mustNewVectorAggregationExpr(&rangeAggregationExpr{
left: &logRange{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchNotRegexp, "foo", "bar")}},
interval: 5 * time.Hour,
},
operation: "count_over_time",
}, "avg", &grouping{
without: false,
groups: []string{"bar", "foo"},
}, nil),
},
{
in: `max without (bar) (count_over_time({ foo !~ "bar" }[5h]))`,
exp: mustNewVectorAggregationExpr(&rangeAggregationExpr{
left: &logRange{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchNotRegexp, "foo", "bar")}},
interval: 5 * time.Hour,
},
operation: "count_over_time",
}, "max", &grouping{
without: true,
groups: []string{"bar"},
}, nil),
},
{
in: `topk(10,count_over_time({ foo !~ "bar" }[5h])) without (bar)`,
exp: mustNewVectorAggregationExpr(&rangeAggregationExpr{
left: &logRange{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchNotRegexp, "foo", "bar")}},
interval: 5 * time.Hour,
},
operation: "count_over_time",
}, "topk", &grouping{
without: true,
groups: []string{"bar"},
}, newString("10")),
},
{
in: `bottomk(30 ,sum(rate({ foo !~ "bar" }[5h])) by (foo))`,
exp: mustNewVectorAggregationExpr(mustNewVectorAggregationExpr(&rangeAggregationExpr{
left: &logRange{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchNotRegexp, "foo", "bar")}},
interval: 5 * time.Hour,
},
operation: "rate",
}, "sum", &grouping{
groups: []string{"foo"},
without: false,
}, nil), "bottomk", nil,
newString("30")),
},
{
in: `max( sum(count_over_time({ foo !~ "bar" }[5h])) without (foo,bar) ) by (foo)`,
exp: mustNewVectorAggregationExpr(mustNewVectorAggregationExpr(&rangeAggregationExpr{
left: &logRange{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchNotRegexp, "foo", "bar")}},
interval: 5 * time.Hour,
},
operation: "count_over_time",
}, "sum", &grouping{
groups: []string{"foo", "bar"},
without: true,
}, nil), "max", &grouping{
groups: []string{"foo"},
without: false,
}, nil),
},
{
in: `unk({ foo !~ "bar" }[5m])`,
err: ParseError{
msg: "syntax error: unexpected IDENTIFIER",
line: 1,
col: 1,
},
},
{
in: `rate({ foo !~ "bar" }[5minutes])`,
err: ParseError{
msg: "time: unknown unit minutes in duration 5minutes",
line: 0,
col: 22,
},
},
{
in: `rate({ foo !~ "bar" }[5)`,
err: ParseError{
msg: "missing closing ']' in duration",
line: 0,
col: 22,
},
},
{
in: `min({ foo !~ "bar" }[5m])`,
err: ParseError{
msg: "syntax error: unexpected {",
line: 1,
col: 5,
},
},
{
in: `sum(3 ,count_over_time({ foo !~ "bar" }[5h]))`,
err: ParseError{
msg: "unsupported parameter for operation sum(3,",
line: 0,
col: 0,
},
},
{
in: `topk(count_over_time({ foo !~ "bar" }[5h]))`,
err: ParseError{
msg: "parameter required for operation topk",
line: 0,
col: 0,
},
},
{
in: `bottomk(he,count_over_time({ foo !~ "bar" }[5h]))`,
err: ParseError{
msg: "invalid parameter bottomk(he,",
line: 0,
col: 0,
},
},
{
in: `stddev({ foo !~ "bar" })`,
err: ParseError{
msg: "syntax error: unexpected {",
line: 1,
col: 8,
},
},
{
in: `{ foo = "bar", bar != "baz" }`,
exp: &matchersExpr{matchers: []*labels.Matcher{
@ -105,6 +279,125 @@ func TestParse(t *testing.T) {
match: "flap",
},
},
{
in: `count_over_time(({foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap")[5m])`,
exp: newRangeAggregationExpr(
&logRange{
left: &filterExpr{
left: &filterExpr{
left: &filterExpr{
left: &filterExpr{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
ty: labels.MatchEqual,
match: "baz",
},
ty: labels.MatchRegexp,
match: "blip",
},
ty: labels.MatchNotEqual,
match: "flip",
},
ty: labels.MatchNotRegexp,
match: "flap",
},
interval: 5 * time.Minute,
}, OpTypeCountOverTime),
},
{
in: `sum(count_over_time(({foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap")[5m])) by (foo)`,
exp: mustNewVectorAggregationExpr(newRangeAggregationExpr(
&logRange{
left: &filterExpr{
left: &filterExpr{
left: &filterExpr{
left: &filterExpr{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
ty: labels.MatchEqual,
match: "baz",
},
ty: labels.MatchRegexp,
match: "blip",
},
ty: labels.MatchNotEqual,
match: "flip",
},
ty: labels.MatchNotRegexp,
match: "flap",
},
interval: 5 * time.Minute,
}, OpTypeCountOverTime),
"sum",
&grouping{
without: false,
groups: []string{"foo"},
},
nil),
},
{
in: `topk(5,count_over_time(({foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap")[5m])) without (foo)`,
exp: mustNewVectorAggregationExpr(newRangeAggregationExpr(
&logRange{
left: &filterExpr{
left: &filterExpr{
left: &filterExpr{
left: &filterExpr{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
ty: labels.MatchEqual,
match: "baz",
},
ty: labels.MatchRegexp,
match: "blip",
},
ty: labels.MatchNotEqual,
match: "flip",
},
ty: labels.MatchNotRegexp,
match: "flap",
},
interval: 5 * time.Minute,
}, OpTypeCountOverTime),
"topk",
&grouping{
without: true,
groups: []string{"foo"},
},
newString("5")),
},
{
in: `topk(5,sum(rate(({foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap")[5m])) by (app))`,
exp: mustNewVectorAggregationExpr(
mustNewVectorAggregationExpr(
newRangeAggregationExpr(
&logRange{
left: &filterExpr{
left: &filterExpr{
left: &filterExpr{
left: &filterExpr{
left: &matchersExpr{matchers: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
ty: labels.MatchEqual,
match: "baz",
},
ty: labels.MatchRegexp,
match: "blip",
},
ty: labels.MatchNotEqual,
match: "flip",
},
ty: labels.MatchNotRegexp,
match: "flap",
},
interval: 5 * time.Minute,
}, OpTypeRate),
"sum",
&grouping{
without: false,
groups: []string{"app"},
},
nil),
"topk",
nil,
newString("5")),
},
{
in: `{foo="bar}`,
err: ParseError{

@ -0,0 +1,129 @@
package logql
import (
"github.com/grafana/loki/pkg/iter"
"github.com/prometheus/prometheus/promql"
)
// RangeVectorAggregator aggregates samples for a given range of samples.
// It receives the current nano-seconds timestamp and the list of point within
// the range.
type RangeVectorAggregator func(int64, []promql.Point) float64
// RangeVectorIterator iterates through a range of samples.
// To fetch the current vector use `At` with a `RangeVectorAggregator`.
type RangeVectorIterator interface {
Next() bool
At(aggregator RangeVectorAggregator) (int64, promql.Vector)
Close() error
}
type rangeVectorIterator struct {
iter iter.PeekingEntryIterator
selRange, step, end, current int64
window map[string]*promql.Series
}
func newRangeVectorIterator(
it iter.EntryIterator,
selRange, step, start, end int64) *rangeVectorIterator {
// forces at least one step.
if step == 0 {
step = 1
}
return &rangeVectorIterator{
iter: iter.NewPeekingIterator(it),
step: step,
end: end,
selRange: selRange,
current: start - step, // first loop iteration will set it to start
window: map[string]*promql.Series{},
}
}
func (r *rangeVectorIterator) Next() bool {
// slides the range window to the next position
r.current = r.current + r.step
if r.current > r.end {
return false
}
rangeEnd := r.current
rangeStart := r.current - r.selRange
// load samples
r.popBack(rangeStart)
r.load(rangeStart, rangeEnd)
return true
}
func (r *rangeVectorIterator) Close() error {
return r.iter.Close()
}
// popBack removes all entries out of the current window from the back.
func (r *rangeVectorIterator) popBack(newStart int64) {
// possible improvement: if there is no overlap we can just remove all.
for fp := range r.window {
lastPoint := 0
for i, p := range r.window[fp].Points {
if p.T <= newStart {
lastPoint = i
continue
}
break
}
r.window[fp].Points = r.window[fp].Points[lastPoint+1:]
if len(r.window[fp].Points) == 0 {
delete(r.window, fp)
}
}
}
// load the next sample range window.
func (r *rangeVectorIterator) load(start, end int64) {
for lbs, entry, hasNext := r.iter.Peek(); hasNext; lbs, entry, hasNext = r.iter.Peek() {
if entry.Timestamp.UnixNano() > end {
// not consuming the iterator as this belong to another range.
return
}
// the lower bound of the range is not inclusive
if entry.Timestamp.UnixNano() <= start {
_ = r.iter.Next()
continue
}
// adds the sample.
var series *promql.Series
var ok bool
series, ok = r.window[lbs]
if !ok {
series = &promql.Series{
Points: []promql.Point{},
}
r.window[lbs] = series
}
series.Points = append(series.Points, promql.Point{
T: entry.Timestamp.UnixNano(),
V: 1,
})
_ = r.iter.Next()
}
}
func (r *rangeVectorIterator) At(aggregator RangeVectorAggregator) (int64, promql.Vector) {
result := make([]promql.Sample, 0, len(r.window))
for lbs, series := range r.window {
labels, err := promql.ParseMetric(lbs)
if err != nil {
continue
}
result = append(result, promql.Sample{
Point: promql.Point{
V: aggregator(r.current, series.Points),
T: r.current,
},
Metric: labels,
})
}
return r.current, result
}

@ -0,0 +1,138 @@
package logql
import (
"fmt"
"testing"
"time"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/prometheus/prometheus/promql"
"github.com/stretchr/testify/require"
)
var entries = []logproto.Entry{
{Timestamp: time.Unix(2, 0)},
{Timestamp: time.Unix(5, 0)},
{Timestamp: time.Unix(6, 0)},
{Timestamp: time.Unix(10, 0)},
{Timestamp: time.Unix(10, 1)},
{Timestamp: time.Unix(11, 0)},
{Timestamp: time.Unix(35, 0)},
{Timestamp: time.Unix(35, 1)},
{Timestamp: time.Unix(40, 0)},
{Timestamp: time.Unix(100, 0)},
{Timestamp: time.Unix(100, 1)},
}
var labelFoo, _ = promql.ParseMetric("{app=\"foo\"}")
var labelBar, _ = promql.ParseMetric("{app=\"bar\"}")
func newEntryIterator() iter.EntryIterator {
return iter.NewHeapIterator([]iter.EntryIterator{
iter.NewStreamIterator(&logproto.Stream{
Labels: labelFoo.String(),
Entries: entries,
}),
iter.NewStreamIterator(&logproto.Stream{
Labels: labelBar.String(),
Entries: entries,
}),
}, logproto.FORWARD)
}
func newPoint(t time.Time, v float64) promql.Point {
return promql.Point{T: t.UnixNano(), V: v}
}
func Test_RangeVectorIterator(t *testing.T) {
tests := []struct {
selRange int64
step int64
expectedVectors []promql.Vector
expectedTs []time.Time
}{
{
(5 * time.Second).Nanoseconds(), // no overlap
(30 * time.Second).Nanoseconds(),
[]promql.Vector{
[]promql.Sample{
{Point: newPoint(time.Unix(10, 0), 2), Metric: labelBar},
{Point: newPoint(time.Unix(10, 0), 2), Metric: labelFoo},
},
[]promql.Sample{
{Point: newPoint(time.Unix(40, 0), 2), Metric: labelBar},
{Point: newPoint(time.Unix(40, 0), 2), Metric: labelFoo},
},
{},
[]promql.Sample{
{Point: newPoint(time.Unix(100, 0), 1), Metric: labelBar},
{Point: newPoint(time.Unix(100, 0), 1), Metric: labelFoo},
},
},
[]time.Time{time.Unix(10, 0), time.Unix(40, 0), time.Unix(70, 0), time.Unix(100, 0)},
},
{
(35 * time.Second).Nanoseconds(), // will overlap by 5 sec
(30 * time.Second).Nanoseconds(),
[]promql.Vector{
[]promql.Sample{
{Point: newPoint(time.Unix(10, 0), 4), Metric: labelBar},
{Point: newPoint(time.Unix(10, 0), 4), Metric: labelFoo},
},
[]promql.Sample{
{Point: newPoint(time.Unix(40, 0), 7), Metric: labelBar},
{Point: newPoint(time.Unix(40, 0), 7), Metric: labelFoo},
},
[]promql.Sample{
{Point: newPoint(time.Unix(70, 0), 2), Metric: labelBar},
{Point: newPoint(time.Unix(70, 0), 2), Metric: labelFoo},
},
[]promql.Sample{
{Point: newPoint(time.Unix(100, 0), 1), Metric: labelBar},
{Point: newPoint(time.Unix(100, 0), 1), Metric: labelFoo},
},
},
[]time.Time{time.Unix(10, 0), time.Unix(40, 0), time.Unix(70, 0), time.Unix(100, 0)},
},
{
(30 * time.Second).Nanoseconds(), // same range
(30 * time.Second).Nanoseconds(),
[]promql.Vector{
[]promql.Sample{
{Point: newPoint(time.Unix(10, 0), 4), Metric: labelBar},
{Point: newPoint(time.Unix(10, 0), 4), Metric: labelFoo},
},
[]promql.Sample{
{Point: newPoint(time.Unix(40, 0), 5), Metric: labelBar},
{Point: newPoint(time.Unix(40, 0), 5), Metric: labelFoo},
},
[]promql.Sample{},
[]promql.Sample{
{Point: newPoint(time.Unix(100, 0), 1), Metric: labelBar},
{Point: newPoint(time.Unix(100, 0), 1), Metric: labelFoo},
},
},
[]time.Time{time.Unix(10, 0), time.Unix(40, 0), time.Unix(70, 0), time.Unix(100, 0)},
},
}
for _, tt := range tests {
t.Run(
fmt.Sprintf("logs[%s] - step: %s", time.Duration(tt.selRange), time.Duration(tt.step)),
func(t *testing.T) {
it := newRangeVectorIterator(newEntryIterator(), tt.selRange,
tt.step, time.Unix(10, 0).UnixNano(), time.Unix(100, 0).UnixNano())
i := 0
for it.Next() {
ts, v := it.At(count)
require.ElementsMatch(t, tt.expectedVectors[i], v)
require.Equal(t, tt.expectedTs[i].UnixNano(), ts)
i++
}
require.Equal(t, len(tt.expectedTs), i)
require.Equal(t, len(tt.expectedVectors), i)
})
}
}

@ -0,0 +1,65 @@
package logql
import (
"math"
"github.com/prometheus/prometheus/promql"
)
type vectorByValueHeap promql.Vector
func (s vectorByValueHeap) Len() int {
return len(s)
}
func (s vectorByValueHeap) Less(i, j int) bool {
if math.IsNaN(s[i].V) {
return true
}
return s[i].V < s[j].V
}
func (s vectorByValueHeap) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s *vectorByValueHeap) Push(x interface{}) {
*s = append(*s, *(x.(*promql.Sample)))
}
func (s *vectorByValueHeap) Pop() interface{} {
old := *s
n := len(old)
el := old[n-1]
*s = old[0 : n-1]
return el
}
type vectorByReverseValueHeap promql.Vector
func (s vectorByReverseValueHeap) Len() int {
return len(s)
}
func (s vectorByReverseValueHeap) Less(i, j int) bool {
if math.IsNaN(s[i].V) {
return true
}
return s[i].V > s[j].V
}
func (s vectorByReverseValueHeap) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s *vectorByReverseValueHeap) Push(x interface{}) {
*s = append(*s, *(x.(*promql.Sample)))
}
func (s *vectorByReverseValueHeap) Pop() interface{} {
old := *s
n := len(old)
el := old[n-1]
*s = old[0 : n-1]
return el
}

@ -143,7 +143,9 @@ func (t *Loki) initQuerier() (err error) {
t.httpAuthMiddleware,
)
t.server.HTTP.Path("/ready").Handler(http.HandlerFunc(t.querier.ReadinessHandler))
t.server.HTTP.Handle("/api/prom/query", httpMiddleware.Wrap(http.HandlerFunc(t.querier.QueryHandler)))
t.server.HTTP.Handle("/api/v1/query_range", httpMiddleware.Wrap(http.HandlerFunc(t.querier.RangeQueryHandler)))
t.server.HTTP.Handle("/api/v1/query", httpMiddleware.Wrap(http.HandlerFunc(t.querier.InstantQueryHandler)))
t.server.HTTP.Handle("/api/prom/query", httpMiddleware.Wrap(http.HandlerFunc(t.querier.LogQueryHandler)))
t.server.HTTP.Handle("/api/prom/label", httpMiddleware.Wrap(http.HandlerFunc(t.querier.LabelHandler)))
t.server.HTTP.Handle("/api/prom/label/{name}/values", httpMiddleware.Wrap(http.HandlerFunc(t.querier.LabelHandler)))
t.server.HTTP.Handle("/api/prom/tail", httpMiddleware.Wrap(http.HandlerFunc(t.querier.TailHandler)))

@ -9,6 +9,7 @@ import (
"strings"
"time"
"github.com/prometheus/prometheus/promql"
"github.com/weaveworks/common/httpgrpc/server"
"github.com/weaveworks/common/httpgrpc"
@ -18,6 +19,7 @@ import (
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
)
const (
@ -25,6 +27,7 @@ const (
defaultSince = 1 * time.Hour
wsPingPeriod = 1 * time.Second
maxDelayForInTailing = 5
defaultStep = 1 // 1 seconds
)
// nolint
@ -68,19 +71,48 @@ func directionParam(values url.Values, name string, def logproto.Direction) (log
return logproto.Direction(d), nil
}
func httpRequestToQueryRequest(httpRequest *http.Request) (*logproto.QueryRequest, error) {
func httpRequestToInstantQueryRequest(httpRequest *http.Request) (*instantQueryRequest, error) {
params := httpRequest.URL.Query()
queryRequest := logproto.QueryRequest{
Regex: params.Get("regexp"),
Query: params.Get("query"),
queryRequest := instantQueryRequest{
query: params.Get("query"),
}
var err error
queryRequest.Limit, queryRequest.Start, queryRequest.End, err = httpRequestToLookback(httpRequest)
limit, err := intParam(params, "limit", defaultQueryLimit)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
queryRequest.limit = uint32(limit)
queryRequest.ts, err = unixNanoTimeParam(params, "time", time.Now())
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
queryRequest.direction, err = directionParam(params, "direction", logproto.BACKWARD)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
return &queryRequest, nil
}
func httpRequestToRangeQueryRequest(httpRequest *http.Request) (*rangeQueryRequest, error) {
params := httpRequest.URL.Query()
queryRequest := rangeQueryRequest{
query: params.Get("query"),
}
step, err := intParam(params, "step", defaultStep)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
queryRequest.step = time.Duration(step) * time.Second
queryRequest.limit, queryRequest.start, queryRequest.end, err = httpRequestToLookback(httpRequest)
if err != nil {
return nil, err
}
queryRequest.Direction, err = directionParam(params, "direction", logproto.BACKWARD)
queryRequest.direction, err = directionParam(params, "direction", logproto.BACKWARD)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
@ -91,7 +123,6 @@ func httpRequestToQueryRequest(httpRequest *http.Request) (*logproto.QueryReques
func httpRequestToTailRequest(httpRequest *http.Request) (*logproto.TailRequest, error) {
params := httpRequest.URL.Query()
tailRequest := logproto.TailRequest{
Regex: params.Get("regexp"),
Query: params.Get("query"),
}
var err error
@ -134,22 +165,102 @@ func httpRequestToLookback(httpRequest *http.Request) (limit uint32, start, end
return
}
// QueryHandler is a http.HandlerFunc for queries.
func (q *Querier) QueryHandler(w http.ResponseWriter, r *http.Request) {
request, err := httpRequestToQueryRequest(r)
type QueryResponse struct {
ResultType promql.ValueType `json:"resultType"`
Result promql.Value `json:"result"`
}
type rangeQueryRequest struct {
query string
start, end time.Time
step time.Duration
limit uint32
direction logproto.Direction
}
type instantQueryRequest struct {
query string
ts time.Time
limit uint32
direction logproto.Direction
}
// RangeQueryHandler is a http.HandlerFunc for range queries.
func (q *Querier) RangeQueryHandler(w http.ResponseWriter, r *http.Request) {
request, err := httpRequestToRangeQueryRequest(r)
if err != nil {
server.WriteError(w, err)
return
}
query := q.engine.NewRangeQuery(q, request.query, request.start, request.end, request.step, request.direction, request.limit)
result, err := query.Exec(r.Context())
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
response := &QueryResponse{
ResultType: result.Type(),
Result: result,
}
if err := json.NewEncoder(w).Encode(response); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// InstantQueryHandler is a http.HandlerFunc for instant queries.
func (q *Querier) InstantQueryHandler(w http.ResponseWriter, r *http.Request) {
request, err := httpRequestToInstantQueryRequest(r)
if err != nil {
server.WriteError(w, err)
return
}
query := q.engine.NewInstantQuery(q, request.query, request.ts, request.direction, request.limit)
result, err := query.Exec(r.Context())
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
response := &QueryResponse{
ResultType: result.Type(),
Result: result,
}
if err := json.NewEncoder(w).Encode(response); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
level.Debug(util.Logger).Log("request", fmt.Sprintf("%+v", request))
result, err := q.Query(r.Context(), request)
// LogQueryHandler is a http.HandlerFunc for log only queries.
func (q *Querier) LogQueryHandler(w http.ResponseWriter, r *http.Request) {
request, err := httpRequestToRangeQueryRequest(r)
if err != nil {
server.WriteError(w, err)
return
}
query := q.engine.NewRangeQuery(q, request.query, request.start, request.end, request.step, request.direction, request.limit)
result, err := query.Exec(r.Context())
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if err := json.NewEncoder(w).Encode(result); err != nil {
if result.Type() != logql.ValueTypeStreams {
http.Error(w, fmt.Sprintf("log query only support %s result type, current type is %s", logql.ValueTypeStreams, result.Type()), http.StatusBadRequest)
return
}
if err := json.NewEncoder(w).Encode(
struct {
Streams promql.Value `json:"streams"`
}{
Streams: result,
},
); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}

@ -13,10 +13,10 @@ import (
"github.com/prometheus/common/model"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/loki/pkg/helpers"
"github.com/grafana/loki/pkg/ingester/client"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/storage"
)
@ -31,8 +31,9 @@ var readinessProbeSuccess = []byte("Ready")
// Config for a querier.
type Config struct {
TailMaxDuration time.Duration `yaml:"tail_max_duration"`
QueryTimeout time.Duration `yaml:"query_timeout"`
QueryTimeout time.Duration `yaml:"query_timeout"`
TailMaxDuration time.Duration `yaml:"tail_max_duration"`
Engine logql.EngineOpts `yaml:"engine,omitempty"`
}
// RegisterFlags register flags.
@ -43,10 +44,11 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
// Querier handlers queries.
type Querier struct {
cfg Config
ring ring.ReadRing
pool *cortex_client.Pool
store storage.Store
cfg Config
ring ring.ReadRing
pool *cortex_client.Pool
store storage.Store
engine *logql.Engine
}
// New makes a new Querier.
@ -62,10 +64,11 @@ func New(cfg Config, clientCfg client.Config, ring ring.ReadRing, store storage.
// used for testing purposes
func newQuerier(cfg Config, clientCfg client.Config, clientFactory cortex_client.Factory, ring ring.ReadRing, store storage.Store) (*Querier, error) {
return &Querier{
cfg: cfg,
ring: ring,
pool: cortex_client.NewPool(clientCfg.PoolConfig, ring, clientFactory, util.Logger),
store: store,
cfg: cfg,
ring: ring,
pool: cortex_client.NewPool(clientCfg.PoolConfig, ring, clientFactory, util.Logger),
store: store,
engine: logql.NewEngine(cfg.Engine),
}, nil
}
@ -139,42 +142,27 @@ func (q *Querier) forGivenIngesters(replicationSet ring.ReplicationSet, f func(l
return result, nil
}
// Query does the heavy lifting for an actual query.
func (q *Querier) Query(ctx context.Context, req *logproto.QueryRequest) (*logproto.QueryResponse, error) {
// Select Implements logql.Querier
func (q *Querier) Select(ctx context.Context, params logql.SelectParams) (iter.EntryIterator, error) {
// Enforce the query timeout while querying backends
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(q.cfg.QueryTimeout))
defer cancel()
iterators, err := q.getQueryIterators(ctx, req)
ingesterIterators, err := q.queryIngesters(ctx, params)
if err != nil {
return nil, err
}
iterator := iter.NewHeapIterator(iterators, req.Direction)
defer helpers.LogError("closing iterator", iterator.Close)
resp, _, err := iter.ReadBatch(iterator, req.Limit)
return resp, err
}
func (q *Querier) getQueryIterators(ctx context.Context, req *logproto.QueryRequest) ([]iter.EntryIterator, error) {
ingesterIterators, err := q.queryIngesters(ctx, req)
chunkStoreIterators, err := q.store.LazyQuery(ctx, params)
if err != nil {
return nil, err
}
chunkStoreIterators, err := q.store.LazyQuery(ctx, req)
if err != nil {
return nil, err
}
iterators := append(ingesterIterators, chunkStoreIterators)
return iterators, nil
return iter.NewHeapIterator(iterators, params.Direction), nil
}
func (q *Querier) queryIngesters(ctx context.Context, req *logproto.QueryRequest) ([]iter.EntryIterator, error) {
func (q *Querier) queryIngesters(ctx context.Context, params logql.SelectParams) ([]iter.EntryIterator, error) {
clients, err := q.forAllIngesters(func(client logproto.QuerierClient) (interface{}, error) {
return client.Query(ctx, req)
return client.Query(ctx, params.QueryRequest)
})
if err != nil {
return nil, err
@ -182,7 +170,7 @@ func (q *Querier) queryIngesters(ctx context.Context, req *logproto.QueryRequest
iterators := make([]iter.EntryIterator, len(clients))
for i := range clients {
iterators[i] = iter.NewQueryClientIterator(clients[i].response.(logproto.Querier_QueryClient), req.Direction)
iterators[i] = iter.NewQueryClientIterator(clients[i].response.(logproto.Querier_QueryClient), params.Direction)
}
return iterators, nil
}
@ -289,20 +277,21 @@ func (q *Querier) Tail(ctx context.Context, req *logproto.TailRequest) (*Tailer,
tailClients[clients[i].addr] = clients[i].response.(logproto.Querier_TailClient)
}
histReq := logproto.QueryRequest{
Query: req.Query,
Start: req.Start,
End: time.Now(),
Limit: req.Limit,
Direction: logproto.BACKWARD,
Regex: req.Regex,
histReq := logql.SelectParams{
QueryRequest: &logproto.QueryRequest{
Selector: req.Query,
Start: req.Start,
End: time.Now(),
Limit: req.Limit,
Direction: logproto.BACKWARD,
},
}
histIterators, err := q.getQueryIterators(queryCtx, &histReq)
histIterators, err := q.Select(queryCtx, histReq)
if err != nil {
return nil, err
}
reversedIterator, err := iter.NewEntryIteratorForward(iter.NewHeapIterator(histIterators, logproto.BACKWARD), req.Limit, true)
reversedIterator, err := iter.NewEntryIteratorForward(histIterators, req.Limit, true)
if err != nil {
return nil, err
}

@ -12,6 +12,7 @@ import (
"github.com/grafana/loki/pkg/ingester/client"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/util"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@ -166,7 +167,7 @@ func newStoreMock() *storeMock {
return &storeMock{}
}
func (s *storeMock) LazyQuery(ctx context.Context, req *logproto.QueryRequest) (iter.EntryIterator, error) {
func (s *storeMock) LazyQuery(ctx context.Context, req logql.SelectParams) (iter.EntryIterator, error) {
args := s.Called(ctx, req)
return args.Get(0).(iter.EntryIterator), args.Error(1)
}

@ -7,6 +7,7 @@ import (
"github.com/cortexproject/cortex/pkg/ring"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
@ -20,13 +21,15 @@ const (
)
func TestQuerier_Query_QueryTimeoutConfigFlag(t *testing.T) {
request := logproto.QueryRequest{
Query: "{type=\"test\"}",
query := &logproto.QueryRequest{
Selector: "{type=\"test\"}",
Limit: 10,
Start: time.Now().Add(-1 * time.Minute),
End: time.Now(),
Direction: logproto.FORWARD,
Regex: "",
}
request := logql.SelectParams{
QueryRequest: query,
}
store := newStoreMock()
@ -36,7 +39,7 @@ func TestQuerier_Query_QueryTimeoutConfigFlag(t *testing.T) {
queryClient.On("Recv").Return(mockQueryResponse([]*logproto.Stream{mockStream(1, 2)}), nil)
ingesterClient := newQuerierClientMock()
ingesterClient.On("Query", mock.Anything, &request, mock.Anything).Return(queryClient, nil)
ingesterClient.On("Query", mock.Anything, query, mock.Anything).Return(queryClient, nil)
q, err := newQuerier(
mockQuerierConfig(),
@ -47,7 +50,7 @@ func TestQuerier_Query_QueryTimeoutConfigFlag(t *testing.T) {
require.NoError(t, err)
ctx := user.InjectOrgID(context.Background(), "test")
_, err = q.Query(ctx, &request)
_, err = q.Select(ctx, request)
require.NoError(t, err)
calls := ingesterClient.GetMockedCallsByMethod("Query")
@ -112,7 +115,6 @@ func TestQuerier_Label_QueryTimeoutConfigFlag(t *testing.T) {
func TestQuerier_Tail_QueryTimeoutConfigFlag(t *testing.T) {
request := logproto.TailRequest{
Query: "{type=\"test\"}",
Regex: "",
DelayFor: 0,
Limit: 10,
Start: time.Now(),

@ -9,7 +9,6 @@ import (
"github.com/cortexproject/cortex/pkg/util/validation"
"github.com/grafana/loki/pkg/chunkenc"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/util"
"github.com/prometheus/common/model"
@ -31,7 +30,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
// Store is the Loki chunk store to retrieve and save chunks.
type Store interface {
chunk.Store
LazyQuery(ctx context.Context, req *logproto.QueryRequest) (iter.EntryIterator, error)
LazyQuery(ctx context.Context, req logql.SelectParams) (iter.EntryIterator, error)
}
type store struct {
@ -53,45 +52,44 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf
// LazyQuery returns an iterator that will query the store for more chunks while iterating instead of fetching all chunks upfront
// for that request.
func (s *store) LazyQuery(ctx context.Context, req *logproto.QueryRequest) (iter.EntryIterator, error) {
expr, err := logql.ParseExpr(req.Query)
func (s *store) LazyQuery(ctx context.Context, req logql.SelectParams) (iter.EntryIterator, error) {
expr, err := req.LogSelector()
if err != nil {
return nil, err
}
if req.Regex != "" {
expr = logql.NewFilterExpr(expr, labels.MatchRegexp, req.Regex)
filter, err := expr.Filter()
if err != nil {
return nil, err
}
querier := logql.QuerierFunc(func(matchers []*labels.Matcher, filter logql.Filter) (iter.EntryIterator, error) {
nameLabelMatcher, err := labels.NewMatcher(labels.MatchEqual, labels.MetricName, "logs")
if err != nil {
return nil, err
}
matchers := expr.Matchers()
nameLabelMatcher, err := labels.NewMatcher(labels.MatchEqual, labels.MetricName, "logs")
if err != nil {
return nil, err
}
matchers = append(matchers, nameLabelMatcher)
from, through := util.RoundToMilliseconds(req.Start, req.End)
chks, fetchers, err := s.GetChunkRefs(ctx, from, through, matchers...)
if err != nil {
return nil, err
}
matchers = append(matchers, nameLabelMatcher)
from, through := util.RoundToMilliseconds(req.Start, req.End)
chks, fetchers, err := s.GetChunkRefs(ctx, from, through, matchers...)
if err != nil {
return nil, err
}
var totalChunks int
for i := range chks {
chks[i] = filterChunksByTime(from, through, chks[i])
totalChunks += len(chks[i])
}
// creates lazychunks with chunks ref.
lazyChunks := make([]*chunkenc.LazyChunk, 0, totalChunks)
for i := range chks {
for _, c := range chks[i] {
lazyChunks = append(lazyChunks, &chunkenc.LazyChunk{Chunk: c, Fetcher: fetchers[i]})
}
var totalChunks int
for i := range chks {
chks[i] = filterChunksByTime(from, through, chks[i])
totalChunks += len(chks[i])
}
// creates lazychunks with chunks ref.
lazyChunks := make([]*chunkenc.LazyChunk, 0, totalChunks)
for i := range chks {
for _, c := range chks[i] {
lazyChunks = append(lazyChunks, &chunkenc.LazyChunk{Chunk: c, Fetcher: fetchers[i]})
}
return newBatchChunkIterator(ctx, lazyChunks, s.cfg.MaxChunkBatchSize, matchers, filter, req), nil
})
}
return newBatchChunkIterator(ctx, lazyChunks, s.cfg.MaxChunkBatchSize, matchers, filter, req.QueryRequest), nil
return expr.Eval(querier)
}
func filterChunksByTime(from, through model.Time, chunks []chunk.Chunk) []chunk.Chunk {

@ -15,6 +15,7 @@ import (
"github.com/cortexproject/cortex/pkg/util/validation"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
"github.com/prometheus/common/model"
"github.com/weaveworks/common/user"
)
@ -29,8 +30,7 @@ var (
//go test -bench=. -benchmem -memprofile memprofile.out -cpuprofile profile.out
func Benchmark_store_LazyQueryRegexBackward(b *testing.B) {
benchmarkStoreQuery(b, &logproto.QueryRequest{
Query: "{foo=\"bar\"}",
Regex: "fuzz",
Selector: `{foo="bar"} |= "fuzz"`,
Limit: 1000,
Start: time.Unix(0, start.UnixNano()),
End: time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()),
@ -40,8 +40,7 @@ func Benchmark_store_LazyQueryRegexBackward(b *testing.B) {
func Benchmark_store_LazyQueryLogQLBackward(b *testing.B) {
benchmarkStoreQuery(b, &logproto.QueryRequest{
Query: "{foo=\"bar\"} |= \"test\" != \"toto\"",
Regex: "fuzz",
Selector: `{foo="bar"} |= "test" != "toto" |= "fuzz"`,
Limit: 1000,
Start: time.Unix(0, start.UnixNano()),
End: time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()),
@ -51,8 +50,7 @@ func Benchmark_store_LazyQueryLogQLBackward(b *testing.B) {
func Benchmark_store_LazyQueryRegexForward(b *testing.B) {
benchmarkStoreQuery(b, &logproto.QueryRequest{
Query: "{foo=\"bar\"}",
Regex: "fuzz",
Selector: `{foo="bar"} |= "fuzz"`,
Limit: 1000,
Start: time.Unix(0, start.UnixNano()),
End: time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()),
@ -62,7 +60,7 @@ func Benchmark_store_LazyQueryRegexForward(b *testing.B) {
func Benchmark_store_LazyQueryForward(b *testing.B) {
benchmarkStoreQuery(b, &logproto.QueryRequest{
Query: "{foo=\"bar\"}",
Selector: `{foo="bar"}`,
Limit: 1000,
Start: time.Unix(0, start.UnixNano()),
End: time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()),
@ -72,7 +70,7 @@ func Benchmark_store_LazyQueryForward(b *testing.B) {
func Benchmark_store_LazyQueryBackward(b *testing.B) {
benchmarkStoreQuery(b, &logproto.QueryRequest{
Query: "{foo=\"bar\"}",
Selector: `{foo="bar"}`,
Limit: 1000,
Start: time.Unix(0, start.UnixNano()),
End: time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()),
@ -102,7 +100,7 @@ func benchmarkStoreQuery(b *testing.B, query *logproto.QueryRequest) {
}
}()
for i := 0; i < b.N; i++ {
iter, err := chunkStore.LazyQuery(ctx, query)
iter, err := chunkStore.LazyQuery(ctx, logql.SelectParams{QueryRequest: query})
if err != nil {
b.Fatal(err)
}
@ -334,7 +332,7 @@ func Test_store_LazyQuery(t *testing.T) {
MaxChunkBatchSize: 10,
},
}
it, err := s.LazyQuery(context.Background(), tt.req)
it, err := s.LazyQuery(context.Background(), logql.SelectParams{QueryRequest: tt.req})
if err != nil {
t.Errorf("store.LazyQuery() error = %v", err)
return

@ -83,16 +83,16 @@ func newChunk(stream logproto.Stream) chunk.Chunk {
}
func newMatchers(matchers string) []*labels.Matcher {
ls, err := logql.ParseExpr(matchers)
res, err := logql.ParseMatchers(matchers)
if err != nil {
panic(err)
}
return ls.Matchers()
return res
}
func newQuery(query string, start, end time.Time, direction logproto.Direction) *logproto.QueryRequest {
return &logproto.QueryRequest{
Query: query,
Selector: query,
Start: start,
Limit: 1000,
End: end,

@ -9,6 +9,7 @@ import (
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/grafana/loki/pkg/logql"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
)
type byLabel []client.LabelAdapter
@ -19,11 +20,10 @@ func (s byLabel) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// ToClientLabels parses the labels and converts them to the Cortex type.
func ToClientLabels(labels string) ([]client.LabelAdapter, error) {
ls, err := logql.ParseExpr(labels)
matchers, err := logql.ParseMatchers(labels)
if err != nil {
return nil, err
}
matchers := ls.Matchers()
result := make([]client.LabelAdapter, 0, len(matchers))
for _, m := range matchers {
result = append(result, client.LabelAdapter{
@ -50,3 +50,13 @@ func RoundToMilliseconds(from, through time.Time) (model.Time, model.Time) {
return model.Time(int64(math.Floor(float64(from.UnixNano()) / float64(time.Millisecond)))),
model.Time(int64(math.Ceil(float64(through.UnixNano()) / float64(time.Millisecond))))
}
// LabelsToMetric converts a Labels to Metric
// Don't do this on any performance sensitive paths.
func LabelsToMetric(ls labels.Labels) model.Metric {
m := make(model.Metric, len(ls))
for _, l := range ls {
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
}
return m
}

Loading…
Cancel
Save