diff --git a/docs/operations/upgrade.md b/docs/operations/upgrade.md index 8080ff1d0e..707ed2e53d 100644 --- a/docs/operations/upgrade.md +++ b/docs/operations/upgrade.md @@ -6,6 +6,11 @@ Unfortunately Loki is software and software is hard and sometimes things are not On this page we will document any upgrade issues/gotchas/considerations we are aware of. +## 1.6.0 + +A new ingester GRPC API has been added allowing to speed up metric queries, to ensure a rollout without query errors make sure you upgrade all ingesters first. +Once this is done you can then proceed with the rest of the deployment, this is to ensure that queriers won't look for an API not yet available. + ## 1.5.0 Note: The required upgrade path outlined for version 1.4.0 below is still true for moving to 1.5.0 from any release older than 1.4.0 (e.g. 1.3.0->1.5.0 needs to also look at the 1.4.0 upgrade requirements). @@ -102,8 +107,8 @@ docker run -d --name=loki --mount source=loki-data,target=/loki -p 3100:3100 gra Notice the change in the `target=/loki` for 1.5.0 to the new data directory location specified in the [included Loki config file](../../cmd/loki/loki-docker-config.yaml). -The intermediate step of using an ubuntu image to change the ownership of the Loki files to the new user might not be necessary if you can easily access these files to run the `chown` command directly. -That is if you have access to `/var/lib/docker/volumes` or if you mounted to a different local filesystem directory, you can change the ownership directly without using a container. +The intermediate step of using an ubuntu image to change the ownership of the Loki files to the new user might not be necessary if you can easily access these files to run the `chown` command directly. +That is if you have access to `/var/lib/docker/volumes` or if you mounted to a different local filesystem directory, you can change the ownership directly without using a container. ### Loki Duration Configs @@ -146,7 +151,7 @@ The new values are: ```yaml min_period: max_period: -max_retries: +max_retries: ``` ## 1.4.0 @@ -157,9 +162,9 @@ One such config change which will affect Loki users: In the [cache_config](../configuration/README.md#cache_config): -`defaul_validity` has changed to `default_validity` - -Also in the unlikely case you were configuring your schema via arguments and not a config file, this is no longer supported. This is not something we had ever provided as an option via docs and is unlikely anyone is doing, but worth mentioning. +`defaul_validity` has changed to `default_validity` + +Also in the unlikely case you were configuring your schema via arguments and not a config file, this is no longer supported. This is not something we had ever provided as an option via docs and is unlikely anyone is doing, but worth mentioning. The other config changes should not be relevant to Loki. @@ -184,7 +189,7 @@ There are two options for upgrade if you are not on version 1.3.0 and are using * Upgrade first to v1.3.0 **BEFORE** upgrading to v1.4.0 -OR +OR **Note:** If you are running a single binary you only need to add this flag to your single binary command. diff --git a/go.mod b/go.mod index 43261515d3..bb3d895aac 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/blang/semver v3.5.1+incompatible // indirect github.com/bmatcuk/doublestar v1.2.2 github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee + github.com/cespare/xxhash/v2 v2.1.1 github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/cortexproject/cortex v1.2.1-0.20200702073552-0ea5a8b50b19 @@ -44,6 +45,7 @@ require ( github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.10.0 github.com/prometheus/prometheus v1.8.2-0.20200626180636-d17d88935c8d + github.com/segmentio/fasthash v1.0.2 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd github.com/stretchr/testify v1.5.1 @@ -53,6 +55,7 @@ require ( github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5 go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50 golang.org/x/net v0.0.0-20200602114024-627f9648deb9 + golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect google.golang.org/grpc v1.29.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 diff --git a/go.sum b/go.sum index 184117eab2..85b0005dd9 100644 --- a/go.sum +++ b/go.sum @@ -1074,6 +1074,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszjmIzcY/tvdDYKRLVvzggtAmmJkn9j4GQ= github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= +github.com/segmentio/fasthash v1.0.2 h1:86fGDl2hB+iSHYlccB/FP9qRGvLNuH/fhEEFn6gnQUs= +github.com/segmentio/fasthash v1.0.2/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sercand/kuberesolver v2.1.0+incompatible h1:iJ1oCzPQ/aacsbCWLfJW1hPKkHMvCEgNSA9kvWcb9MY= @@ -1429,6 +1431,8 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/pkg/chunkenc/dumb_chunk.go b/pkg/chunkenc/dumb_chunk.go index bd286b624f..a831ccbc34 100644 --- a/pkg/chunkenc/dumb_chunk.go +++ b/pkg/chunkenc/dumb_chunk.go @@ -93,6 +93,10 @@ func (c *dumbChunk) Iterator(_ context.Context, from, through time.Time, directi }, nil } +func (c *dumbChunk) SampleIterator(_ context.Context, from, through time.Time, _ logql.LineFilter, _ logql.SampleExtractor) iter.SampleIterator { + return nil +} + func (c *dumbChunk) Bytes() ([]byte, error) { return nil, nil } diff --git a/pkg/chunkenc/hash_test.go b/pkg/chunkenc/hash_test.go new file mode 100644 index 0000000000..bbcda322e1 --- /dev/null +++ b/pkg/chunkenc/hash_test.go @@ -0,0 +1,73 @@ +package chunkenc + +import ( + "hash/fnv" + "hash/maphash" + "testing" + + "github.com/cespare/xxhash/v2" + "github.com/segmentio/fasthash/fnv1a" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/chunkenc/testdata" +) + +var res uint64 + +func Benchmark_fnv64a(b *testing.B) { + for n := 0; n < b.N; n++ { + for i := 0; i < len(testdata.LogsBytes); i++ { + h := fnv.New64a() + _, _ = h.Write(testdata.LogsBytes[i]) + res = h.Sum64() + } + } +} + +func Benchmark_fnv64a_third_party(b *testing.B) { + for n := 0; n < b.N; n++ { + for i := 0; i < len(testdata.LogsBytes); i++ { + res = fnv1a.HashBytes64(testdata.LogsBytes[i]) + } + } +} + +func Benchmark_xxhash(b *testing.B) { + for n := 0; n < b.N; n++ { + for i := 0; i < len(testdata.LogsBytes); i++ { + res = xxhash.Sum64(testdata.LogsBytes[i]) + } + } +} + +func Benchmark_hashmap(b *testing.B) { + // I discarded hashmap/map as it will compute different value on different binary for the same entry + var h maphash.Hash + for n := 0; n < b.N; n++ { + for i := 0; i < len(testdata.LogsBytes); i++ { + h.SetSeed(maphash.MakeSeed()) + _, _ = h.Write(testdata.LogsBytes[i]) + res = h.Sum64() + } + } +} + +func Test_xxhash_integrity(t *testing.T) { + data := []uint64{} + + for i := 0; i < len(testdata.LogsBytes); i++ { + data = append(data, xxhash.Sum64(testdata.LogsBytes[i])) + } + + for i := 0; i < len(testdata.LogsBytes); i++ { + require.Equal(t, data[i], xxhash.Sum64(testdata.LogsBytes[i])) + } + + unique := map[uint64]struct{}{} + for i := 0; i < len(testdata.LogsBytes); i++ { + _, ok := unique[xxhash.Sum64(testdata.LogsBytes[i])] + require.False(t, ok, string(testdata.LogsBytes[i])) // all lines have been made unique + unique[xxhash.Sum64(testdata.LogsBytes[i])] = struct{}{} + } + +} diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go index 8fbbac065c..ccde54b9b7 100644 --- a/pkg/chunkenc/interface.go +++ b/pkg/chunkenc/interface.go @@ -98,6 +98,7 @@ type Chunk interface { SpaceFor(*logproto.Entry) bool Append(*logproto.Entry) error Iterator(ctx context.Context, from, through time.Time, direction logproto.Direction, filter logql.LineFilter) (iter.EntryIterator, error) + SampleIterator(ctx context.Context, from, through time.Time, filter logql.LineFilter, extractor logql.SampleExtractor) iter.SampleIterator // Returns the list of blocks in the chunks. Blocks(mintT, maxtT time.Time) []Block Size() int @@ -121,4 +122,6 @@ type Block interface { Entries() int // Iterator returns an entry iterator for the block. Iterator(context.Context, logql.LineFilter) iter.EntryIterator + // SampleIterator returns a sample iterator for the block. + SampleIterator(context.Context, logql.LineFilter, logql.SampleExtractor) iter.SampleIterator } diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go index 9fe0bd1417..407cd3add9 100644 --- a/pkg/chunkenc/memchunk.go +++ b/pkg/chunkenc/memchunk.go @@ -11,6 +11,7 @@ import ( "io" "time" + "github.com/cespare/xxhash/v2" "github.com/cortexproject/cortex/pkg/util" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" @@ -502,6 +503,29 @@ func (c *MemChunk) Iterator(ctx context.Context, mintT, maxtT time.Time, directi return iter.NewEntryReversedIter(iterForward) } +// Iterator implements Chunk. +func (c *MemChunk) SampleIterator(ctx context.Context, mintT, maxtT time.Time, filter logql.LineFilter, extractor logql.SampleExtractor) iter.SampleIterator { + mint, maxt := mintT.UnixNano(), maxtT.UnixNano() + its := make([]iter.SampleIterator, 0, len(c.blocks)+1) + + for _, b := range c.blocks { + if maxt < b.mint || b.maxt < mint { + continue + } + its = append(its, b.SampleIterator(ctx, filter, extractor)) + } + + if !c.head.isEmpty() { + its = append(its, c.head.sampleIterator(ctx, mint, maxt, filter, extractor)) + } + + return iter.NewTimeRangedSampleIterator( + iter.NewNonOverlappingSampleIterator(its, ""), + mint, + maxt, + ) +} + // Blocks implements Chunk func (c *MemChunk) Blocks(mintT, maxtT time.Time) []Block { mint, maxt := mintT.UnixNano(), maxtT.UnixNano() @@ -519,7 +543,14 @@ func (b block) Iterator(ctx context.Context, filter logql.LineFilter) iter.Entry if len(b.b) == 0 { return emptyIterator } - return newBufferedIterator(ctx, b.readers, b.b, filter) + return newEntryIterator(ctx, b.readers, b.b, filter) +} + +func (b block) SampleIterator(ctx context.Context, filter logql.LineFilter, extractor logql.SampleExtractor) iter.SampleIterator { + if len(b.b) == 0 { + return iter.NoopIterator + } + return newSampleIterator(ctx, b.readers, b.b, filter, extractor) } func (b block) Offset() int { @@ -566,6 +597,34 @@ func (hb *headBlock) iterator(ctx context.Context, mint, maxt int64, filter logq } } +func (hb *headBlock) sampleIterator(ctx context.Context, mint, maxt int64, filter logql.LineFilter, extractor logql.SampleExtractor) iter.SampleIterator { + if hb.isEmpty() || (maxt < hb.mint || hb.maxt < mint) { + return iter.NoopIterator + } + chunkStats := stats.GetChunkData(ctx) + chunkStats.HeadChunkLines += int64(len(hb.entries)) + samples := make([]logproto.Sample, 0, len(hb.entries)) + for _, e := range hb.entries { + chunkStats.HeadChunkBytes += int64(len(e.s)) + if filter == nil || filter.Filter([]byte(e.s)) { + if value, ok := extractor.Extract([]byte(e.s)); ok { + samples = append(samples, logproto.Sample{ + Timestamp: e.t, + Value: value, + Hash: xxhash.Sum64([]byte(e.s)), + }) + + } + } + } + + if len(samples) == 0 { + return iter.NoopIterator + } + + return iter.NewSeriesIterator(logproto.Series{Samples: samples}) +} + var emptyIterator = &listIterator{} type listIterator struct { @@ -604,12 +663,13 @@ type bufferedIterator struct { reader io.Reader pool ReaderPool - cur logproto.Entry - err error - buf []byte // The buffer for a single entry. - decBuf []byte // The buffer for decoding the lengths. + decBuf []byte // The buffer for decoding the lengths. + buf []byte // The buffer for a single entry. + currLine []byte // the current line, this is the same as the buffer but sliced the the line size. + currTs int64 + consumed bool closed bool @@ -627,6 +687,7 @@ func newBufferedIterator(ctx context.Context, pool ReaderPool, b []byte, filter pool: pool, filter: filter, decBuf: make([]byte, binary.MaxVarintLen64), + consumed: true, } } @@ -649,8 +710,9 @@ func (si *bufferedIterator) Next() bool { if si.filter != nil && !si.filter.Filter(line) { continue } - si.cur.Line = string(line) - si.cur.Timestamp = time.Unix(0, ts) + si.currTs = ts + si.currLine = line + si.consumed = false return true } } @@ -690,7 +752,6 @@ func (si *bufferedIterator) moveNext() (int64, []byte, bool) { return 0, nil, false } } - // Then process reading the line. n, err := si.bufReader.Read(si.buf[:lineSize]) if err != nil && err != io.EOF { @@ -708,10 +769,6 @@ func (si *bufferedIterator) moveNext() (int64, []byte, bool) { return ts, si.buf[:lineSize], true } -func (si *bufferedIterator) Entry() logproto.Entry { - return si.cur -} - func (si *bufferedIterator) Error() error { return si.err } func (si *bufferedIterator) Close() error { @@ -741,3 +798,58 @@ func (si *bufferedIterator) close() { } func (si *bufferedIterator) Labels() string { return "" } + +func newEntryIterator(ctx context.Context, pool ReaderPool, b []byte, filter logql.LineFilter) iter.EntryIterator { + return &entryBufferedIterator{ + bufferedIterator: newBufferedIterator(ctx, pool, b, filter), + } +} + +type entryBufferedIterator struct { + *bufferedIterator + cur logproto.Entry +} + +func (e *entryBufferedIterator) Entry() logproto.Entry { + if !e.consumed { + e.cur.Timestamp = time.Unix(0, e.currTs) + e.cur.Line = string(e.currLine) + e.consumed = true + } + return e.cur +} + +func newSampleIterator(ctx context.Context, pool ReaderPool, b []byte, filter logql.LineFilter, extractor logql.SampleExtractor) iter.SampleIterator { + it := &sampleBufferedIterator{ + bufferedIterator: newBufferedIterator(ctx, pool, b, filter), + extractor: extractor, + } + return it +} + +type sampleBufferedIterator struct { + *bufferedIterator + extractor logql.SampleExtractor + cur logproto.Sample + currValue float64 +} + +func (e *sampleBufferedIterator) Next() bool { + var ok bool + for e.bufferedIterator.Next() { + if e.currValue, ok = e.extractor.Extract(e.currLine); ok { + return true + } + } + return false +} + +func (e *sampleBufferedIterator) Sample() logproto.Sample { + if !e.consumed { + e.cur.Timestamp = e.currTs + e.cur.Hash = xxhash.Sum64(e.currLine) + e.cur.Value = e.currValue + e.consumed = true + } + return e.cur +} diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go index 7d4a8a91a0..b05b477811 100644 --- a/pkg/chunkenc/memchunk_test.go +++ b/pkg/chunkenc/memchunk_test.go @@ -112,6 +112,21 @@ func TestBlock(t *testing.T) { } require.NoError(t, it.Error()) + require.NoError(t, it.Close()) + require.Equal(t, len(cases), idx) + + sampleIt := chk.SampleIterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), nil, logql.ExtractCount) + idx = 0 + for sampleIt.Next() { + s := sampleIt.Sample() + require.Equal(t, cases[idx].ts, s.Timestamp) + require.Equal(t, 1., s.Value) + require.NotEmpty(t, s.Hash) + idx++ + } + + require.NoError(t, sampleIt.Error()) + require.NoError(t, sampleIt.Close()) require.Equal(t, len(cases), idx) t.Run("bounded-iteration", func(t *testing.T) { @@ -225,7 +240,7 @@ func TestSerialization(t *testing.T) { t.Run(enc.String(), func(t *testing.T) { chk := NewMemChunk(enc, testBlockSize, testTargetSize) - numSamples := 500000 + numSamples := 50000 for i := 0; i < numSamples; i++ { require.NoError(t, chk.Append(logprotoEntry(int64(i), string(i)))) @@ -246,9 +261,18 @@ func TestSerialization(t *testing.T) { require.Equal(t, int64(i), e.Timestamp.UnixNano()) require.Equal(t, string(i), e.Line) } - require.NoError(t, it.Error()) + sampleIt := bc.SampleIterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), nil, logql.ExtractCount) + for i := 0; i < numSamples; i++ { + require.True(t, sampleIt.Next(), i) + + s := sampleIt.Sample() + require.Equal(t, int64(i), s.Timestamp) + require.Equal(t, 1., s.Value) + } + require.NoError(t, sampleIt.Error()) + byt2, err := chk.Bytes() require.NoError(t, err) diff --git a/pkg/chunkenc/testdata/testdata.go b/pkg/chunkenc/testdata/testdata.go index 265ca4d640..c0520f7a6b 100644 --- a/pkg/chunkenc/testdata/testdata.go +++ b/pkg/chunkenc/testdata/testdata.go @@ -4,13 +4,22 @@ import "strings" // LogString returns a test log line. Returns the same line for the same index. func LogString(index int64) string { - if index > int64(len(logs)-1) { - index = index % int64(len(logs)) + if index > int64(len(Logs)-1) { + index = index % int64(len(Logs)) } - return logs[index] + return Logs[index] } -var logs = strings.Split(`level=info ts=2019-12-12T15:00:08.325Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576130400000 maxt=1576152000000 ulid=01DVX9ZHNM71GRCJS7M34Q0EV7 sources="[01DVWNC6NWY1A60AZV3Z6DGS65 01DVWW7XXX75GHA6ZDTD170CSZ 01DVX33N5W86CWJJVRPAVXJRWJ]" duration=2.897213221s +var LogsBytes [][]byte + +func init() { + LogsBytes = make([][]byte, len(Logs)) + for i, l := range Logs { + LogsBytes[i] = []byte(l) + } +} + +var Logs = strings.Split(`level=info ts=2019-12-12T15:00:08.325Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576130400000 maxt=1576152000000 ulid=01DVX9ZHNM71GRCJS7M34Q0EV7 sources="[01DVWNC6NWY1A60AZV3Z6DGS65 01DVWW7XXX75GHA6ZDTD170CSZ 01DVX33N5W86CWJJVRPAVXJRWJ]" duration=2.897213221s level=info ts=2019-12-12T15:00:08.296Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576130400000 maxt=1576152000000 ulid=01DVX9ZHQRVN42AF196NYJ9C4C sources="[01DVWNC6NSPJRCSBZ4QD3SXS66 01DVWW7XY69Y4YT09HR0RSR8KY 01DVX33N5SMVPB1TMD9J1M8GGK]" duration=2.800759388s level=info ts=2019-12-12T15:00:05.285Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1037 last=1039 duration=3.030078405s level=info ts=2019-12-12T15:00:05.225Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1037 last=1039 duration=3.019791992s @@ -258,9 +267,9 @@ level=info ts=2019-12-10T13:50:13.596Z caller=main.go:771 msg="Completed loading level=info ts=2019-12-10T13:50:13.553Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" level=info ts=2019-12-10T13:50:13.552Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" level=info ts=2019-12-10T13:50:13.551Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" -level=info ts=2019-12-10T13:50:13.551Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" +level=info ts=2019-12-10T13:50:13.554Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" level=info ts=2019-12-10T13:50:13.546Z caller=main.go:743 msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml -curl -X POST --fail -o - -sS http://localhost:80/prometheus/-/reload +1 curl -X POST --fail -o - -sS http://localhost:80/prometheus/-/reload 2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..2019_12_05_07_22_08.390693530": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC 2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..data": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC 2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..data/prometheus.yml": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC @@ -330,35 +339,35 @@ level=info ts=2019-12-10T11:38:35.064Z caller=queue_manager.go:559 component=rem level=error ts=2019-12-10T11:38:12.281Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" level=error ts=2019-12-10T11:38:12.281Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=69 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples" level=error ts=2019-12-10T11:38:12.235Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.235Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.236Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:12.204Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" level=error ts=2019-12-10T11:38:12.183Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" level=error ts=2019-12-10T11:38:12.129Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=91 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples" -level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" -level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" -level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=56 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 56 samples" +level=error ts=2019-12-10T11:38:12.128Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.129Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" +level=error ts=2019-12-10T11:38:12.139Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" +level=error ts=2019-12-10T11:38:12.140Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.141Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.142Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=56 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 56 samples" level=error ts=2019-12-10T11:38:12.125Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=52 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 52 samples" level=error ts=2019-12-10T11:38:12.124Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:12.123Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" level=error ts=2019-12-10T11:38:12.122Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=69 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples" level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=67 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples" -level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=78 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 78 samples" -level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.119Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.119Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.1201Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=67 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples" +level=error ts=2019-12-10T11:38:12.1202Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=78 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 78 samples" +level=error ts=2019-12-10T11:38:12.1203Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.1191Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.1192Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:12.081Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" -level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.0371Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.0372Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.0373Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" +level=error ts=2019-12-10T11:38:12.025Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.026Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" level=error ts=2019-12-10T11:38:11.920Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:11.917Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:11.916Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" @@ -367,76 +376,76 @@ level=error ts=2019-12-10T11:38:11.913Z caller=queue_manager.go:770 component=re level=error ts=2019-12-10T11:38:11.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:11.721Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:11.176Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" -level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" -level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" -level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" -level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" -level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=46 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 46 samples" -level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" -level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=92 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples" -level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" -level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:10.955Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:10.953Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" -level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.1451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.1452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.1453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" +level=error ts=2019-12-10T11:38:11.1454Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" +level=error ts=2019-12-10T11:38:11.1455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.1441Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.1442Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" +level=error ts=2019-12-10T11:38:11.1443Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" +level=error ts=2019-12-10T11:38:11.1444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:11.1445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=46 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 46 samples" +level=error ts=2019-12-10T11:38:11.1431Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.1432Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" +level=error ts=2019-12-10T11:38:11.1433Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.1434Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=92 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples" +level=error ts=2019-12-10T11:38:11.0011Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.0012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.0012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" +level=error ts=2019-12-10T11:38:11.0013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.9551Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.9532Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.9421Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" +level=error ts=2019-12-10T11:38:10.9422Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.9423Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:10.941Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=58 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples" -level=error ts=2019-12-10T11:38:10.941Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:10.940Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:10.940Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.943Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=23 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 23 samples" level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=59 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples" level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=81 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples" level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.879Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.8781Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.8782Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" level=error ts=2019-12-10T11:38:10.039Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:09.902Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=92 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples" +level=error ts=2019-12-10T11:38:09.902Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.903Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=92 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples" level=error ts=2019-12-10T11:38:09.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" -level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=73 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples" -level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=68 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 68 samples" -level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=35 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 35 samples" -level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" -level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:09.8971Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" +level=error ts=2019-12-10T11:38:09.8972Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=73 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples" +level=error ts=2019-12-10T11:38:09.8973Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8974Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=68 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 68 samples" +level=error ts=2019-12-10T11:38:09.8975Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8976Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=35 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 35 samples" +level=error ts=2019-12-10T11:38:09.8977Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8951Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8952Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:09.8953Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" level=error ts=2019-12-10T11:38:09.876Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" -level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=92 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples" -level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" -level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8401Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" +level=error ts=2019-12-10T11:38:09.8402Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=92 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples" +level=error ts=2019-12-10T11:38:09.8403Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8404Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" +level=error ts=2019-12-10T11:38:09.8406Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8407Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:09.832Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:09.825Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.825Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" -level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=17 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 17 samples" -level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8251Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8241Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8242Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8243Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:09.8244Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=17 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 17 samples" +level=error ts=2019-12-10T11:38:09.8245Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.8246Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:09.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=94 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples" level=error ts=2019-12-10T11:38:09.806Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=81 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples" level=error ts=2019-12-10T11:38:09.736Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" @@ -452,11 +461,11 @@ level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=re level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=50 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 50 samples" level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" -level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" -level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 113 samples" level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=37 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples" level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=67 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples" @@ -465,51 +474,51 @@ level=error ts=2019-12-10T11:38:08.795Z caller=queue_manager.go:770 component=re level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=94 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples" level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" -level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" -level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.693Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:08.693Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=58 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples" level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=91 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples" level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=59 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples" -level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.691Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:08.691Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" level=error ts=2019-12-10T11:38:08.678Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" -level=error ts=2019-12-10T11:38:08.677Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.648Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.647Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.677Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" +level=error ts=2019-12-10T11:38:08.648Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 107 samples" +level=error ts=2019-12-10T11:38:08.647Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 108 samples" level=error ts=2019-12-10T11:38:08.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" -level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 109 samples" +level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 110 samples" +level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 111 samples" level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=71 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples" -level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 112 samples" level=error ts=2019-12-10T11:38:08.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" level=error ts=2019-12-10T11:38:08.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" -level=error ts=2019-12-10T11:38:08.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 113 samples" +level=error ts=2019-12-10T11:38:08.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 114 samples" level=error ts=2019-12-10T11:38:08.553Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" -level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 115 samples" level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=73 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples" level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=87 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 87 samples" -level=error ts=2019-12-10T11:38:08.541Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.541Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 116 samples" level=error ts=2019-12-10T11:38:08.501Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" -level=error ts=2019-12-10T11:38:08.501Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.501Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 117 samples" level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=91 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples" -level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 118 samples" +level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 119 samples" +level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 120 samples" level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" -level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 130 samples" +level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 131 samples" +level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 132 samples" +level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 133 samples" level=error ts=2019-12-10T11:38:08.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" level=error ts=2019-12-10T11:38:08.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=65 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples" level=error ts=2019-12-10T11:38:08.441Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" @@ -517,263 +526,263 @@ level=error ts=2019-12-10T11:38:08.433Z caller=queue_manager.go:770 component=re level=error ts=2019-12-10T11:38:08.431Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" level=error ts=2019-12-10T11:38:08.407Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" +level=error ts=2019-12-10T11:38:08.3941Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.395Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.396Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" -level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.604Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.391Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:08.395Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:07.604Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:07.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" level=error ts=2019-12-10T11:38:07.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=60 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples" -level=error ts=2019-12-10T11:38:07.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=71 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples" level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" level=error ts=2019-12-10T11:38:07.600Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=93 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 93 samples" level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" +level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" level=error ts=2019-12-10T11:38:07.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" -level=error ts=2019-12-10T11:38:07.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 107 samples" +level=error ts=2019-12-10T11:38:07.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 108 samples" level=error ts=2019-12-10T11:38:07.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=74 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples" level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" -level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 109 samples" +level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 110 samples" level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=91 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples" level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=73 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples" -level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 111 samples" level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=81 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples" level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" -level=error ts=2019-12-10T11:38:07.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 112 samples" +level=error ts=2019-12-10T11:38:07.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 113 samples" level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=53 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 53 samples" -level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 114 samples" +level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 115 samples" +level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 116 samples" +level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 117 samples" +level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 118 samples" level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=39 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 39 samples" -level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.587Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.513Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 119 samples" +level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 120 samples" +level=error ts=2019-12-10T11:38:07.587Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 121 samples" +level=error ts=2019-12-10T11:38:07.513Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 122 samples" level=error ts=2019-12-10T11:38:07.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=59 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples" -level=error ts=2019-12-10T11:38:07.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 123 samples" level=error ts=2019-12-10T11:38:07.510Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" -level=error ts=2019-12-10T11:38:07.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 124 samples" +level=error ts=2019-12-10T11:38:07.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 125 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 126 samples" level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=67 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples" -level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=60 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples" level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" -level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.494Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.489Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.488Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.484Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.483Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.483Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=66 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 127 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 128 samples" +level=error ts=2019-12-10T11:38:07.494Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 129 samples" +level=error ts=2019-12-10T11:38:07.489Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 130 samples" +level=error ts=2019-12-10T11:38:07.488Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 131 samples" +level=error ts=2019-12-10T11:38:07.484Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 132 samples" +level=error ts=2019-12-10T11:38:07.483Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 133 samples" +level=error ts=2019-12-10T11:38:07.483Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 134 samples" +level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 136 samples" +level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=66 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples" level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=71 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples" level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=66 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples" level=error ts=2019-12-10T11:38:07.478Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=76 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 76 samples" level=error ts=2019-12-10T11:38:07.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" -level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=65 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples" -level=error ts=2019-12-10T11:38:07.454Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.454Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" -level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=37 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples" level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=30 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 30 samples" level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=74 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples" level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" -level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 115 samples" +level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=74 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples" level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=63 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 63 samples" level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" level=error ts=2019-12-10T11:38:07.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:07.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" -level=error ts=2019-12-10T11:38:07.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:07.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=57 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 57 samples" -level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" -level=error ts=2019-12-10T11:38:07.443Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.390Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.443Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" +level=error ts=2019-12-10T11:38:07.390Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 107 samples" +level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 108 samples" +level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 109 samples" +level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 110 samples" +level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 111 samples" level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" -level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 112 samples" level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" -level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 113 samples" level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=63 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 63 samples" -level=error ts=2019-12-10T11:38:07.376Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.376Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 114 samples" level=error ts=2019-12-10T11:38:07.358Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" -level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 115 samples" +level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 116 samples" level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" -level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 117 samples" level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" -level=error ts=2019-12-10T11:38:07.310Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.259Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.310Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 118 samples" +level=error ts=2019-12-10T11:38:07.259Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 119 samples" level=error ts=2019-12-10T11:38:07.259Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" -level=error ts=2019-12-10T11:38:07.045Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.045Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 120 samples" level=error ts=2019-12-10T11:38:07.044Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" -level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.043Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 121 samples" +level=error ts=2019-12-10T11:38:07.043Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 122 samples" level=error ts=2019-12-10T11:38:07.043Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=69 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples" -level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 123 samples" +level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 124 samples" +level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 125 samples" level=error ts=2019-12-10T11:38:07.041Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=49 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 49 samples" level=error ts=2019-12-10T11:38:06.947Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=81 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples" -level=error ts=2019-12-10T11:38:06.947Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.946Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.944Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.947Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 126 samples" +level=error ts=2019-12-10T11:38:06.946Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 127 samples" +level=error ts=2019-12-10T11:38:06.944Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 128 samples" level=error ts=2019-12-10T11:38:06.943Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" -level=error ts=2019-12-10T11:38:06.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 129 samples" level=error ts=2019-12-10T11:38:06.938Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" -level=error ts=2019-12-10T11:38:06.937Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.937Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 130 samples" level=error ts=2019-12-10T11:38:06.848Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=54 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 54 samples" -level=error ts=2019-12-10T11:38:06.848Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.842Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.841Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.747Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.848Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 131 samples" +level=error ts=2019-12-10T11:38:06.842Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 132 samples" +level=error ts=2019-12-10T11:38:06.841Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 133 samples" +level=error ts=2019-12-10T11:38:06.747Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 134 samples" level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" -level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 135 samples" +level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 136 samples" +level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=65 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples" -level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.715Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:06.715Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" level=error ts=2019-12-10T11:38:06.646Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" level=error ts=2019-12-10T11:38:06.646Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=37 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples" level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" -level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.639Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.638Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:06.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:06.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:06.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:06.639Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" +level=error ts=2019-12-10T11:38:06.638Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" level=error ts=2019-12-10T11:38:06.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=93 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 93 samples" -level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 107 samples" +level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 108 samples" level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" -level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 109 samples" +level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 110 samples" +level=error ts=2019-12-10T11:38:06.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 111 samples" level=error ts=2019-12-10T11:38:06.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=56 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 56 samples" -level=error ts=2019-12-10T11:38:06.590Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.590Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 112 samples" level=error ts=2019-12-10T11:38:06.590Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" -level=error ts=2019-12-10T11:38:06.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 113 samples" +level=error ts=2019-12-10T11:38:06.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 114 samples" level=error ts=2019-12-10T11:38:06.588Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" -level=error ts=2019-12-10T11:38:06.588Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.581Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.581Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.588Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 116 samples" +level=error ts=2019-12-10T11:38:06.581Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 117 samples" +level=error ts=2019-12-10T11:38:06.581Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 118 samples" level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" -level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.562Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.562Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.561Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.560Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.551Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.550Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.544Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.544Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 119 samples" +level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 120 samples" +level=error ts=2019-12-10T11:38:06.562Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 121 samples" +level=error ts=2019-12-10T11:38:06.562Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 122 samples" +level=error ts=2019-12-10T11:38:06.561Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 123 samples" +level=error ts=2019-12-10T11:38:06.560Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 124 samples" +level=error ts=2019-12-10T11:38:06.551Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 125 samples" +level=error ts=2019-12-10T11:38:06.550Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 126 samples" +level=error ts=2019-12-10T11:38:06.544Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 127 samples" +level=error ts=2019-12-10T11:38:06.544Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 128 samples" +level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 129 samples" +level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 130 samples" +level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 131 samples" level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" -level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.540Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.538Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.538Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.505Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 132 samples" +level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 133 samples" +level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 134 samples" +level=error ts=2019-12-10T11:38:06.540Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 135 samples" +level=error ts=2019-12-10T11:38:06.538Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 136 samples" +level=error ts=2019-12-10T11:38:06.538Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 137 samples" +level=error ts=2019-12-10T11:38:06.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 138 samples" +level=error ts=2019-12-10T11:38:06.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 140 samples" +level=error ts=2019-12-10T11:38:06.505Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 141 samples" level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=69 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples" -level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 142 samples" +level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 143 samples" level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" -level=error ts=2019-12-10T11:38:06.489Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.489Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 144 samples" +level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 145 samples" +level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 146 samples" +level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 147 samples" level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" -level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 148 samples" +level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 149 samples" level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" -level=error ts=2019-12-10T11:38:06.459Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.459Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 150 samples" +level=error ts=2019-12-10T11:38:06.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 151 samples" +level=error ts=2019-12-10T11:38:06.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 152 samples" +level=error ts=2019-12-10T11:38:06.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 153 samples" +level=error ts=2019-12-10T11:38:06.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 154 samples" level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" -level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 155 samples" +level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 156 samples" +level=error ts=2019-12-10T11:38:06.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 157 samples" level=error ts=2019-12-10T11:38:06.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" -level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" -level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" level=error ts=2019-12-10T11:38:06.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=66 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples" @@ -783,7 +792,7 @@ level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=re level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=71 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples" level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=62 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 62 samples" -level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" +level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=58 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples" level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" @@ -792,222 +801,222 @@ level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=re level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.353Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:06.353Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 107 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 108 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 109 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 110 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 111 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 112 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 113 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 114 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 115 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 116 samples" level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=60 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples" level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=22 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 22 samples" -level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 155 samples" level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.347Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.347Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:06.347Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:06.347Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" level=error ts=2019-12-10T11:38:06.346Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" -level=error ts=2019-12-10T11:38:06.346Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.340Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.346Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" +level=error ts=2019-12-10T11:38:06.340Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 107 samples" +level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 108 samples" +level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 109 samples" level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=74 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples" -level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.338Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.338Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.337Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.330Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.329Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.329Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 110 samples" +level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 111 samples" +level=error ts=2019-12-10T11:38:06.338Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 112 samples" +level=error ts=2019-12-10T11:38:06.338Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 113 samples" +level=error ts=2019-12-10T11:38:06.337Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 114 samples" +level=error ts=2019-12-10T11:38:06.330Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 115 samples" +level=error ts=2019-12-10T11:38:06.329Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 116 samples" +level=error ts=2019-12-10T11:38:06.329Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 117 samples" level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" -level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 118 samples" +level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 119 samples" level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" level=error ts=2019-12-10T11:38:06.327Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" -level=error ts=2019-12-10T11:38:06.244Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.231Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.230Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.244Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 120 samples" +level=error ts=2019-12-10T11:38:06.231Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 121 samples" +level=error ts=2019-12-10T11:38:06.230Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 122 samples" +level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 124 samples" +level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 125 samples" level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=94 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples" -level=error ts=2019-12-10T11:38:06.206Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.206Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 126 samples" +level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 127 samples" level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" -level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.198Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 128 samples" +level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 129 samples" +level=error ts=2019-12-10T11:38:06.198Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 130 samples" +level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 131 samples" +level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 132 samples" +level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 133 samples" +level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 134 samples" +level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 135 samples" level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=81 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples" -level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.088Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:06.087Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 136 samples" +level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 139 samples" +level=error ts=2019-12-10T11:38:06.088Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 137 samples" +level=error ts=2019-12-10T11:38:06.087Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 138 samples" +level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" -level=error ts=2019-12-10T11:38:05.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:05.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" level=info ts=2019-12-10T11:38:05.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=1 to=2 -level=error ts=2019-12-10T11:38:04.977Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.976Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.977Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:04.976Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" +level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" -level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:04.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" +level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" level=error ts=2019-12-10T11:38:04.719Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" -level=error ts=2019-12-10T11:38:04.719Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.717Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.719Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" +level=error ts=2019-12-10T11:38:04.717Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" level=error ts=2019-12-10T11:38:04.716Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=87 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 87 samples" -level=error ts=2019-12-10T11:38:04.716Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" -level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.716Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 107 samples" +level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 108 samples" +level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 999 samples" +level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 109 samples" +level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 111 samples" +level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 112 samples" +level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 113 samples" level=error ts=2019-12-10T11:38:04.709Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=66 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples" level=error ts=2019-12-10T11:38:04.709Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=64 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 64 samples" -level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 114 samples" level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" -level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.627Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.621Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.619Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.136Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.136Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.135Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.018Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.018Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.017Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.016Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.015Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.015Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 115 samples" +level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 116 samples" +level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 117 samples" +level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 118 samples" +level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 119 samples" +level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 120 samples" +level=error ts=2019-12-10T11:38:04.627Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 121 samples" +level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 122 samples" +level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 121 samples" +level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 1244 samples" +level=error ts=2019-12-10T11:38:04.621Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 123 samples" +level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 124 samples" +level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 125 samples" +level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 126 samples" +level=error ts=2019-12-10T11:38:04.619Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 127 samples" +level=error ts=2019-12-10T11:38:04.136Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 131 samples" +level=error ts=2019-12-10T11:38:04.136Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 132 samples" +level=error ts=2019-12-10T11:38:04.135Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 133 samples" +level=error ts=2019-12-10T11:38:04.018Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 134 samples" +level=error ts=2019-12-10T11:38:04.018Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 135 samples" +level=error ts=2019-12-10T11:38:04.017Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 141 samples" +level=error ts=2019-12-10T11:38:04.016Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 142 samples" +level=error ts=2019-12-10T11:38:04.015Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 153 samples" +level=error ts=2019-12-10T11:38:04.015Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 1223 samples" +level=error ts=2019-12-10T11:38:04.013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 164 samples" +level=error ts=2019-12-10T11:38:04.013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 161 samples" +level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 162 samples" +level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 163 samples" +level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 169 samples" +level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 170 samples" level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.010Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.009Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.009Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.008Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.010Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:04.009Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:04.009Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:04.008Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" -level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.005Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.005Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" +level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" +level=error ts=2019-12-10T11:38:04.005Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 107 samples" +level=error ts=2019-12-10T11:38:04.005Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 108 samples" level=error ts=2019-12-10T11:38:04.004Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" -level=error ts=2019-12-10T11:38:04.003Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.002Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.002Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:04.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.923Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.003Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 109 samples" +level=error ts=2019-12-10T11:38:04.002Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 110 samples" +level=error ts=2019-12-10T11:38:04.002Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 112 samples" +level=error ts=2019-12-10T11:38:04.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 113 samples" +level=error ts=2019-12-10T11:38:03.923Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 114 samples" level=error ts=2019-12-10T11:38:03.923Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" -level=error ts=2019-12-10T11:38:03.922Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.908Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.908Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.922Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 115 samples" +level=error ts=2019-12-10T11:38:03.908Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 116 samples" +level=error ts=2019-12-10T11:38:03.908Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 117 samples" level=error ts=2019-12-10T11:38:03.907Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" -level=error ts=2019-12-10T11:38:03.907Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.906Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.907Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 118 samples" +level=error ts=2019-12-10T11:38:03.906Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 119 samples" +level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 120 samples" level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" -level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 121 samples" +level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 122 samples" level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=71 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples" -level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.902Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 123 samples" +level=error ts=2019-12-10T11:38:03.902Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 124 samples" +level=error ts=2019-12-10T11:38:03.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 125 samples" +level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 126 samples" +level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 127 samples" level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" -level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 128 samples" +level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 129 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 130 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 131 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 132 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 133 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 134 samples" +level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 135 samples" level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" -level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 136 samples" +level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 137 samples" +level=error ts=2019-12-10T11:38:03.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 138 samples" +level=error ts=2019-12-10T11:38:03.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 139 samples" level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" -level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 140 samples" +level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 141 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 142 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 143 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 144 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 145 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 146 samples" level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=76 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 76 samples" -level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" -level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 101 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 102 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 103 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 104 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 105 samples" +level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 106 samples" +level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 107 samples" +level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 108 samples" +level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 109 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 110 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 111 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 112 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 113 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 114 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 115 samples" +level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 116 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 117 samples" +level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 118 samples" +level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 119 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 120 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 121 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 122 samples" level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" -level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" -level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 123 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 134 samples" level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=78 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 78 samples" -level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 155 samples" `, "\n") diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go index 27b4eee99c..40bb8df14f 100644 --- a/pkg/ingester/flush_test.go +++ b/pkg/ingester/flush_test.go @@ -229,7 +229,11 @@ func (s *testStore) IsLocal() bool { return false } -func (s *testStore) LazyQuery(ctx context.Context, req logql.SelectParams) (iter.EntryIterator, error) { +func (s *testStore) SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) { + return nil, nil +} + +func (s *testStore) SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) { return nil, nil } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index b2bab21449..965166a8d6 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -122,7 +122,8 @@ type Ingester struct { // ChunkStore is the interface we need to store chunks. type ChunkStore interface { Put(ctx context.Context, chunks []chunk.Chunk) error - LazyQuery(ctx context.Context, req logql.SelectParams) (iter.EntryIterator, error) + SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) + SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) } // New makes a new Ingester. @@ -285,13 +286,21 @@ func (i *Ingester) Query(req *logproto.QueryRequest, queryServer logproto.Querie } instance := i.getOrCreateInstance(instanceID) - itrs, err := instance.Query(ctx, req) + itrs, err := instance.Query(ctx, logql.SelectLogParams{QueryRequest: req}) if err != nil { return err } - if storeReq := buildStoreRequest(i.cfg, req); storeReq != nil { - storeItr, err := i.store.LazyQuery(ctx, logql.SelectParams{QueryRequest: storeReq}) + if start, end, ok := buildStoreRequest(i.cfg, req.End, req.End, time.Now()); ok { + storeReq := logql.SelectLogParams{QueryRequest: &logproto.QueryRequest{ + Selector: req.Selector, + Direction: req.Direction, + Start: start, + End: end, + Limit: req.Limit, + Shards: req.Shards, + }} + storeItr, err := i.store.SelectLogs(ctx, storeReq) if err != nil { return err } @@ -306,6 +315,45 @@ func (i *Ingester) Query(req *logproto.QueryRequest, queryServer logproto.Querie return sendBatches(queryServer.Context(), heapItr, queryServer, req.Limit) } +// QuerySample the ingesters for series from logs matching a set of matchers. +func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer logproto.Querier_QuerySampleServer) error { + // initialize stats collection for ingester queries and set grpc trailer with stats. + ctx := stats.NewContext(queryServer.Context()) + defer stats.SendAsTrailer(ctx, queryServer) + + instanceID, err := user.ExtractOrgID(ctx) + if err != nil { + return err + } + + instance := i.getOrCreateInstance(instanceID) + itrs, err := instance.QuerySample(ctx, logql.SelectSampleParams{SampleQueryRequest: req}) + if err != nil { + return err + } + + if start, end, ok := buildStoreRequest(i.cfg, req.Start, req.End, time.Now()); ok { + storeReq := logql.SelectSampleParams{SampleQueryRequest: &logproto.SampleQueryRequest{ + Start: start, + End: end, + Selector: req.Selector, + Shards: req.Shards, + }} + storeItr, err := i.store.SelectSamples(ctx, storeReq) + if err != nil { + return err + } + + itrs = append(itrs, storeItr) + } + + heapItr := iter.NewHeapSampleIterator(ctx, itrs) + + defer helpers.LogErrorWithContext(ctx, "closing iterator", heapItr.Close) + + return sendSampleBatches(queryServer.Context(), heapItr, queryServer) +} + // Label returns the set of labels for the stream this ingester knows about. func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { instanceID, err := user.ExtractOrgID(ctx) @@ -336,7 +384,7 @@ func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logp return nil, err } // Adjust the start time based on QueryStoreMaxLookBackPeriod. - start := adjustQueryStartTime(i.cfg, *req.Start) + start := adjustQueryStartTime(i.cfg, *req.Start, time.Now()) if start.After(*req.End) { // The request is older than we are allowed to query the store, just return what we have. return resp, nil @@ -454,30 +502,23 @@ func (i *Ingester) TailersCount(ctx context.Context, in *logproto.TailersCountRe // buildStoreRequest returns a store request from an ingester request, returns nit if QueryStore is set to false in configuration. // The request may be truncated due to QueryStoreMaxLookBackPeriod which limits the range of request to make sure // we only query enough to not miss any data and not add too to many duplicates by covering the who time range in query. -func buildStoreRequest(cfg Config, req *logproto.QueryRequest) *logproto.QueryRequest { +func buildStoreRequest(cfg Config, start, end, now time.Time) (time.Time, time.Time, bool) { if !cfg.QueryStore { - return nil + return time.Time{}, time.Time{}, false } - start := req.Start - end := req.End - start = adjustQueryStartTime(cfg, start) + start = adjustQueryStartTime(cfg, start, now) if start.After(end) { - return nil + return time.Time{}, time.Time{}, false } - - newRequest := *req - newRequest.Start = start - newRequest.End = end - - return &newRequest + return start, end, true } -func adjustQueryStartTime(cfg Config, start time.Time) time.Time { +func adjustQueryStartTime(cfg Config, start, now time.Time) time.Time { if cfg.QueryStoreMaxLookBackPeriod > 0 { - oldestStartTime := time.Now().Add(-cfg.QueryStoreMaxLookBackPeriod) + oldestStartTime := now.Add(-cfg.QueryStoreMaxLookBackPeriod) if oldestStartTime.After(start) { - start = oldestStartTime + return oldestStartTime } } return start diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 58154dce78..6ed1201fbd 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -263,7 +263,11 @@ func (s *mockStore) Put(ctx context.Context, chunks []chunk.Chunk) error { return nil } -func (s *mockStore) LazyQuery(ctx context.Context, req logql.SelectParams) (iter.EntryIterator, error) { +func (s *mockStore) SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) { + return nil, nil +} + +func (s *mockStore) SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) { return nil, nil } @@ -291,75 +295,64 @@ func defaultLimitsTestConfig() validation.Limits { } func TestIngester_buildStoreRequest(t *testing.T) { - ingesterQueryRequest := logproto.QueryRequest{ - Selector: `{foo="bar"}`, - Limit: 100, - } - now := time.Now() - for _, tc := range []struct { - name string - queryStore bool - maxLookBackPeriod time.Duration - ingesterQueryRequest *logproto.QueryRequest - expectedStoreQueryRequest *logproto.QueryRequest + name string + queryStore bool + maxLookBackPeriod time.Duration + start, end time.Time + expectedStart, expectedEnd time.Time + shouldQuery bool }{ { - name: "do not query store", - queryStore: false, - ingesterQueryRequest: recreateRequestWithTime(ingesterQueryRequest, now.Add(-time.Minute), now), - expectedStoreQueryRequest: nil, + name: "do not query store", + queryStore: false, + start: now.Add(-time.Minute), + end: now, + shouldQuery: false, }, { - name: "query store with max look back covering whole request duration", - queryStore: true, - maxLookBackPeriod: time.Hour, - ingesterQueryRequest: recreateRequestWithTime(ingesterQueryRequest, now.Add(-10*time.Minute), now), - expectedStoreQueryRequest: recreateRequestWithTime(ingesterQueryRequest, now.Add(-10*time.Minute), now), + name: "query store with max look back covering whole request duration", + queryStore: true, + maxLookBackPeriod: time.Hour, + start: now.Add(-10 * time.Minute), + end: now, + expectedStart: now.Add(-10 * time.Minute), + expectedEnd: now, + shouldQuery: true, }, { - name: "query store with max look back covering partial request duration", - queryStore: true, - maxLookBackPeriod: time.Hour, - ingesterQueryRequest: recreateRequestWithTime(ingesterQueryRequest, now.Add(-2*time.Hour), now), - expectedStoreQueryRequest: recreateRequestWithTime(ingesterQueryRequest, now.Add(-time.Hour), now), + name: "query store with max look back covering partial request duration", + queryStore: true, + maxLookBackPeriod: time.Hour, + start: now.Add(-2 * time.Hour), + end: now, + expectedStart: now.Add(-time.Hour), + expectedEnd: now, + shouldQuery: true, }, { - name: "query store with max look back not covering request duration at all", - queryStore: true, - maxLookBackPeriod: time.Hour, - ingesterQueryRequest: recreateRequestWithTime(ingesterQueryRequest, now.Add(-4*time.Hour), now.Add(-2*time.Hour)), - expectedStoreQueryRequest: nil, + name: "query store with max look back not covering request duration at all", + queryStore: true, + maxLookBackPeriod: time.Hour, + start: now.Add(-4 * time.Hour), + end: now.Add(-2 * time.Hour), + shouldQuery: false, }, } { t.Run(tc.name, func(t *testing.T) { ingesterConfig := defaultIngesterTestConfig(t) ingesterConfig.QueryStore = tc.queryStore ingesterConfig.QueryStoreMaxLookBackPeriod = tc.maxLookBackPeriod - storeRequest := buildStoreRequest(ingesterConfig, tc.ingesterQueryRequest) - if tc.expectedStoreQueryRequest == nil { - require.Nil(t, storeRequest) - return - } - // because start time of store could be changed and built based on time when function is called we can't predict expected start time. - // So allowing upto 1s difference between expected and actual start time of store query request. - require.Equal(t, tc.expectedStoreQueryRequest.Selector, storeRequest.Selector) - require.Equal(t, tc.expectedStoreQueryRequest.Limit, storeRequest.Limit) - require.Equal(t, tc.expectedStoreQueryRequest.End, storeRequest.End) + start, end, ok := buildStoreRequest(ingesterConfig, tc.start, tc.end, now) - if storeRequest.Start.Sub(tc.expectedStoreQueryRequest.Start) > time.Second { - t.Fatalf("expected upto 1s difference in expected and actual store request end time but got %d", storeRequest.End.Sub(tc.expectedStoreQueryRequest.End)) + if !tc.shouldQuery { + require.False(t, ok) + return } + require.Equal(t, tc.expectedEnd, end, "end") + require.Equal(t, tc.expectedStart, start, "start") }) } } - -func recreateRequestWithTime(req logproto.QueryRequest, start, end time.Time) *logproto.QueryRequest { - newReq := req - newReq.Start = start - newReq.End = end - - return &newReq -} diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index ca5605174f..2903c2b7a9 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -28,7 +28,10 @@ import ( "github.com/grafana/loki/pkg/util/validation" ) -const queryBatchSize = 128 +const ( + queryBatchSize = 128 + queryBatchSampleSize = 512 +) // Errors returned on Query. var ( @@ -192,8 +195,8 @@ func (i *instance) getLabelsFromFingerprint(fp model.Fingerprint) labels.Labels return s.labels } -func (i *instance) Query(ctx context.Context, req *logproto.QueryRequest) ([]iter.EntryIterator, error) { - expr, err := (logql.SelectParams{QueryRequest: req}).LogSelector() +func (i *instance) Query(ctx context.Context, req logql.SelectLogParams) ([]iter.EntryIterator, error) { + expr, err := req.LogSelector() if err != nil { return nil, err } @@ -223,6 +226,40 @@ func (i *instance) Query(ctx context.Context, req *logproto.QueryRequest) ([]ite return iters, nil } +func (i *instance) QuerySample(ctx context.Context, req logql.SelectSampleParams) ([]iter.SampleIterator, error) { + expr, err := req.Expr() + if err != nil { + return nil, err + } + filter, err := expr.Selector().Filter() + if err != nil { + return nil, err + } + extractor, err := expr.Extractor() + if err != nil { + return nil, err + } + ingStats := stats.GetIngesterData(ctx) + var iters []iter.SampleIterator + err = i.forMatchingStreams( + expr.Selector().Matchers(), + func(stream *stream) error { + ingStats.TotalChunksMatched += int64(len(stream.chunks)) + iter, err := stream.SampleIterator(ctx, req.Start, req.End, filter, extractor) + if err != nil { + return err + } + iters = append(iters, iter) + return nil + }, + ) + if err != nil { + return nil, err + } + + return iters, nil +} + func (i *instance) Label(_ context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { var labels []string if req.Values { @@ -466,6 +503,26 @@ func sendBatches(ctx context.Context, i iter.EntryIterator, queryServer logproto return nil } +func sendSampleBatches(ctx context.Context, it iter.SampleIterator, queryServer logproto.Querier_QuerySampleServer) error { + ingStats := stats.GetIngesterData(ctx) + for !isDone(ctx) { + batch, size, err := iter.ReadSampleBatch(it, queryBatchSampleSize) + if err != nil { + return err + } + if len(batch.Series) == 0 { + return nil + } + + if err := queryServer.Send(batch); err != nil { + return err + } + ingStats.TotalLinesSent += int64(size) + ingStats.TotalBatches++ + } + return nil +} + func shouldConsiderStream(stream *stream, req *logproto.SeriesRequest) bool { firstchunkFrom, _ := stream.chunks[0].chunk.Bounds() _, lastChunkTo := stream.chunks[len(stream.chunks)-1].chunk.Bounds() @@ -474,4 +531,4 @@ func shouldConsiderStream(stream *stream, req *logproto.SeriesRequest) bool { return true } return false -} +} \ No newline at end of file diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go index 296cb88cdf..8551d81915 100644 --- a/pkg/ingester/stream.go +++ b/pkg/ingester/stream.go @@ -277,6 +277,18 @@ func (s *stream) Iterator(ctx context.Context, from, through time.Time, directio return iter.NewNonOverlappingIterator(iterators, s.labelsString), nil } +// Returns an SampleIterator. +func (s *stream) SampleIterator(ctx context.Context, from, through time.Time, filter logql.LineFilter, extractor logql.SampleExtractor) (iter.SampleIterator, error) { + iterators := make([]iter.SampleIterator, 0, len(s.chunks)) + for _, c := range s.chunks { + if itr := c.chunk.SampleIterator(ctx, from, through, filter, extractor); itr != nil { + iterators = append(iterators, itr) + } + } + + return iter.NewNonOverlappingSampleIterator(iterators, s.labelsString), nil +} + func (s *stream) addTailer(t *tailer) { s.tailerMtx.Lock() defer s.tailerMtx.Unlock() diff --git a/pkg/iter/iterator.go b/pkg/iter/entry_iterator.go similarity index 97% rename from pkg/iter/iterator.go rename to pkg/iter/entry_iterator.go index 8a2f73b515..0357fbb339 100644 --- a/pkg/iter/iterator.go +++ b/pkg/iter/entry_iterator.go @@ -26,11 +26,12 @@ type noOpIterator struct{} var NoopIterator = noOpIterator{} -func (noOpIterator) Next() bool { return false } -func (noOpIterator) Error() error { return nil } -func (noOpIterator) Labels() string { return "" } -func (noOpIterator) Entry() logproto.Entry { return logproto.Entry{} } -func (noOpIterator) Close() error { return nil } +func (noOpIterator) Next() bool { return false } +func (noOpIterator) Error() error { return nil } +func (noOpIterator) Labels() string { return "" } +func (noOpIterator) Entry() logproto.Entry { return logproto.Entry{} } +func (noOpIterator) Sample() logproto.Sample { return logproto.Sample{} } +func (noOpIterator) Close() error { return nil } // streamIterator iterates over entries in a stream. type streamIterator struct { diff --git a/pkg/iter/iterator_test.go b/pkg/iter/entry_iterator_test.go similarity index 96% rename from pkg/iter/iterator_test.go rename to pkg/iter/entry_iterator_test.go index faa70fc648..4212918f42 100644 --- a/pkg/iter/iterator_test.go +++ b/pkg/iter/entry_iterator_test.go @@ -567,7 +567,7 @@ func Test_timeRangedIterator_Next(t *testing.T) { } for _, tt := range tests { t.Run(fmt.Sprintf("mint:%d maxt:%d", tt.mint.UnixNano(), tt.maxt.UnixNano()), func(t *testing.T) { - i := NewTimeRangedIterator( + it := NewTimeRangedIterator( NewStreamIterator( logproto.Stream{Entries: []logproto.Entry{ {Timestamp: time.Unix(0, 1)}, @@ -578,9 +578,25 @@ func Test_timeRangedIterator_Next(t *testing.T) { tt.maxt, ) for _, b := range tt.expect { - require.Equal(t, b, i.Next()) + require.Equal(t, b, it.Next()) } - require.NoError(t, i.Close()) + require.NoError(t, it.Close()) + }) + t.Run(fmt.Sprintf("mint:%d maxt:%d_sample", tt.mint.UnixNano(), tt.maxt.UnixNano()), func(t *testing.T) { + it := NewTimeRangedSampleIterator( + NewSeriesIterator( + logproto.Series{Samples: []logproto.Sample{ + sample(1), + sample(2), + sample(3), + }}), + tt.mint.UnixNano(), + tt.maxt.UnixNano(), + ) + for _, b := range tt.expect { + require.Equal(t, b, it.Next()) + } + require.NoError(t, it.Close()) }) } } diff --git a/pkg/iter/sample_iterator.go b/pkg/iter/sample_iterator.go new file mode 100644 index 0000000000..a2ab973c38 --- /dev/null +++ b/pkg/iter/sample_iterator.go @@ -0,0 +1,512 @@ +package iter + +import ( + "container/heap" + "context" + "fmt" + "io" + + "github.com/grafana/loki/pkg/helpers" + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/stats" +) + +// SampleIterator iterates over samples in time-order. +type SampleIterator interface { + Next() bool + // todo(ctovena) we should add `Seek(t int64) bool` + // This way we can skip when ranging over samples. + Sample() logproto.Sample + Labels() string + Error() error + Close() error +} + +// PeekingSampleIterator is a sample iterator that can peek sample without moving the current sample. +type PeekingSampleIterator interface { + SampleIterator + Peek() (string, logproto.Sample, bool) +} + +type peekingSampleIterator struct { + iter SampleIterator + + cache *sampleWithLabels + next *sampleWithLabels +} + +type sampleWithLabels struct { + logproto.Sample + labels string +} + +func NewPeekingSampleIterator(iter SampleIterator) PeekingSampleIterator { + // initialize the next entry so we can peek right from the start. + var cache *sampleWithLabels + next := &sampleWithLabels{} + if iter.Next() { + cache = &sampleWithLabels{ + Sample: iter.Sample(), + labels: iter.Labels(), + } + next.Sample = cache.Sample + next.labels = cache.labels + } + return &peekingSampleIterator{ + iter: iter, + cache: cache, + next: next, + } +} + +func (it *peekingSampleIterator) Close() error { + return it.iter.Close() +} + +func (it *peekingSampleIterator) Labels() string { + if it.next != nil { + return it.next.labels + } + return "" +} + +func (it *peekingSampleIterator) Next() bool { + if it.cache != nil { + it.next.Sample = it.cache.Sample + it.next.labels = it.cache.labels + it.cacheNext() + return true + } + return false +} + +// cacheNext caches the next element if it exists. +func (it *peekingSampleIterator) cacheNext() { + if it.iter.Next() { + it.cache.Sample = it.iter.Sample() + it.cache.labels = it.iter.Labels() + return + } + // nothing left removes the cached entry + it.cache = nil +} + +func (it *peekingSampleIterator) Sample() logproto.Sample { + if it.next != nil { + return it.next.Sample + } + return logproto.Sample{} +} + +func (it *peekingSampleIterator) Peek() (string, logproto.Sample, bool) { + if it.cache != nil { + return it.cache.labels, it.cache.Sample, true + } + return "", logproto.Sample{}, false +} + +func (it *peekingSampleIterator) Error() error { + return it.iter.Error() +} + +type sampleIteratorHeap []SampleIterator + +func (h sampleIteratorHeap) Len() int { return len(h) } +func (h sampleIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h sampleIteratorHeap) Peek() SampleIterator { return h[0] } +func (h *sampleIteratorHeap) Push(x interface{}) { + *h = append(*h, x.(SampleIterator)) +} + +func (h *sampleIteratorHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +func (h sampleIteratorHeap) Less(i, j int) bool { + s1, s2 := h[i].Sample(), h[j].Sample() + switch { + case s1.Timestamp < s2.Timestamp: + return true + case s1.Timestamp > s2.Timestamp: + return false + default: + return h[i].Labels() < h[j].Labels() + } +} + +// heapSampleIterator iterates over a heap of iterators. +type heapSampleIterator struct { + heap *sampleIteratorHeap + is []SampleIterator + prefetched bool + stats *stats.ChunkData + + tuples []sampletuple + curr logproto.Sample + currLabels string + errs []error +} + +// NewHeapSampleIterator returns a new iterator which uses a heap to merge together +// entries for multiple iterators. +func NewHeapSampleIterator(ctx context.Context, is []SampleIterator) SampleIterator { + + return &heapSampleIterator{ + stats: stats.GetChunkData(ctx), + is: is, + heap: &sampleIteratorHeap{}, + tuples: make([]sampletuple, 0, len(is)), + } +} + +// prefetch iterates over all inner iterators to merge together, calls Next() on +// each of them to prefetch the first entry and pushes of them - who are not +// empty - to the heap +func (i *heapSampleIterator) prefetch() { + if i.prefetched { + return + } + + i.prefetched = true + for _, it := range i.is { + i.requeue(it, false) + } + + // We can now clear the list of input iterators to merge, given they have all + // been processed and the non empty ones have been pushed to the heap + i.is = nil +} + +// requeue pushes the input ei EntryIterator to the heap, advancing it via an ei.Next() +// call unless the advanced input parameter is true. In this latter case it expects that +// the iterator has already been advanced before calling requeue(). +// +// If the iterator has no more entries or an error occur while advancing it, the iterator +// is not pushed to the heap and any possible error captured, so that can be get via Error(). +func (i *heapSampleIterator) requeue(ei SampleIterator, advanced bool) { + if advanced || ei.Next() { + heap.Push(i.heap, ei) + return + } + + if err := ei.Error(); err != nil { + i.errs = append(i.errs, err) + } + helpers.LogError("closing iterator", ei.Close) +} + +type sampletuple struct { + logproto.Sample + SampleIterator +} + +func (i *heapSampleIterator) Next() bool { + i.prefetch() + + if i.heap.Len() == 0 { + return false + } + + // We support multiple entries with the same timestamp, and we want to + // preserve their original order. We look at all the top entries in the + // heap with the same timestamp, and pop the ones whose common value + // occurs most often. + for i.heap.Len() > 0 { + next := i.heap.Peek() + sample := next.Sample() + if len(i.tuples) > 0 && (i.tuples[0].Labels() != next.Labels() || i.tuples[0].Timestamp != sample.Timestamp) { + break + } + + heap.Pop(i.heap) + i.tuples = append(i.tuples, sampletuple{ + Sample: sample, + SampleIterator: next, + }) + } + + i.curr = i.tuples[0].Sample + i.currLabels = i.tuples[0].Labels() + t := i.tuples[0] + if len(i.tuples) == 1 { + i.requeue(i.tuples[0].SampleIterator, false) + i.tuples = i.tuples[:0] + return true + } + // Requeue the iterators, advancing them if they were consumed. + for j := range i.tuples { + if i.tuples[j].Hash != i.curr.Hash { + i.requeue(i.tuples[j].SampleIterator, true) + continue + } + // we count as duplicates only if the tuple is not the one (t) used to fill the current entry + if i.tuples[j] != t { + i.stats.TotalDuplicates++ + } + i.requeue(i.tuples[j].SampleIterator, false) + } + i.tuples = i.tuples[:0] + return true +} + +func (i *heapSampleIterator) Sample() logproto.Sample { + return i.curr +} + +func (i *heapSampleIterator) Labels() string { + return i.currLabels +} + +func (i *heapSampleIterator) Error() error { + switch len(i.errs) { + case 0: + return nil + case 1: + return i.errs[0] + default: + return fmt.Errorf("Multiple errors: %+v", i.errs) + } +} + +func (i *heapSampleIterator) Close() error { + for i.heap.Len() > 0 { + if err := i.heap.Pop().(SampleIterator).Close(); err != nil { + return err + } + } + i.tuples = nil + return nil +} + +type sampleQueryClientIterator struct { + client QuerySampleClient + err error + curr SampleIterator +} + +// QuerySampleClient is GRPC stream client with only method used by the SampleQueryClientIterator +type QuerySampleClient interface { + Recv() (*logproto.SampleQueryResponse, error) + Context() context.Context + CloseSend() error +} + +// NewQueryClientIterator returns an iterator over a QueryClient. +func NewSampleQueryClientIterator(client QuerySampleClient) SampleIterator { + return &sampleQueryClientIterator{ + client: client, + } +} + +func (i *sampleQueryClientIterator) Next() bool { + for i.curr == nil || !i.curr.Next() { + batch, err := i.client.Recv() + if err == io.EOF { + return false + } else if err != nil { + i.err = err + return false + } + + i.curr = NewSampleQueryResponseIterator(i.client.Context(), batch) + } + + return true +} + +func (i *sampleQueryClientIterator) Sample() logproto.Sample { + return i.curr.Sample() +} + +func (i *sampleQueryClientIterator) Labels() string { + return i.curr.Labels() +} + +func (i *sampleQueryClientIterator) Error() error { + return i.err +} + +func (i *sampleQueryClientIterator) Close() error { + return i.client.CloseSend() +} + +// NewSampleQueryResponseIterator returns an iterator over a SampleQueryResponse. +func NewSampleQueryResponseIterator(ctx context.Context, resp *logproto.SampleQueryResponse) SampleIterator { + return NewMultiSeriesIterator(ctx, resp.Series) +} + +type seriesIterator struct { + i int + samples []logproto.Sample + labels string +} + +// NewMultiSeriesIterator returns an iterator over multiple logproto.Series +func NewMultiSeriesIterator(ctx context.Context, series []logproto.Series) SampleIterator { + is := make([]SampleIterator, 0, len(series)) + for i := range series { + is = append(is, NewSeriesIterator(series[i])) + } + return NewHeapSampleIterator(ctx, is) +} + +// NewSeriesIterator iterates over sample in a series. +func NewSeriesIterator(series logproto.Series) SampleIterator { + return &seriesIterator{ + i: -1, + samples: series.Samples, + labels: series.Labels, + } +} + +func (i *seriesIterator) Next() bool { + i.i++ + return i.i < len(i.samples) +} + +func (i *seriesIterator) Error() error { + return nil +} + +func (i *seriesIterator) Labels() string { + return i.labels +} + +func (i *seriesIterator) Sample() logproto.Sample { + return i.samples[i.i] +} + +func (i *seriesIterator) Close() error { + return nil +} + +type nonOverlappingSampleIterator struct { + labels string + i int + iterators []SampleIterator + curr SampleIterator +} + +// NewNonOverlappingSampleIterator gives a chained iterator over a list of iterators. +func NewNonOverlappingSampleIterator(iterators []SampleIterator, labels string) SampleIterator { + return &nonOverlappingSampleIterator{ + labels: labels, + iterators: iterators, + } +} + +func (i *nonOverlappingSampleIterator) Next() bool { + for i.curr == nil || !i.curr.Next() { + if len(i.iterators) == 0 { + if i.curr != nil { + i.curr.Close() + } + return false + } + if i.curr != nil { + i.curr.Close() + } + i.i++ + i.curr, i.iterators = i.iterators[0], i.iterators[1:] + } + + return true +} + +func (i *nonOverlappingSampleIterator) Sample() logproto.Sample { + return i.curr.Sample() +} + +func (i *nonOverlappingSampleIterator) Labels() string { + if i.labels != "" { + return i.labels + } + + return i.curr.Labels() +} + +func (i *nonOverlappingSampleIterator) Error() error { + if i.curr != nil { + return i.curr.Error() + } + return nil +} + +func (i *nonOverlappingSampleIterator) Close() error { + for _, iter := range i.iterators { + iter.Close() + } + i.iterators = nil + return nil +} + +type timeRangedSampleIterator struct { + SampleIterator + mint, maxt int64 +} + +// NewTimeRangedSampleIterator returns an iterator which filters entries by time range. +func NewTimeRangedSampleIterator(it SampleIterator, mint, maxt int64) SampleIterator { + return &timeRangedSampleIterator{ + SampleIterator: it, + mint: mint, + maxt: maxt, + } +} + +func (i *timeRangedSampleIterator) Next() bool { + ok := i.SampleIterator.Next() + if !ok { + i.SampleIterator.Close() + return ok + } + ts := i.SampleIterator.Sample().Timestamp + for ok && i.mint > ts { + ok = i.SampleIterator.Next() + if !ok { + continue + } + ts = i.SampleIterator.Sample().Timestamp + } + if ok { + if ts == i.mint { // The mint is inclusive + return true + } + if i.maxt < ts || i.maxt == ts { // The maxt is exclusive. + ok = false + } + } + if !ok { + i.SampleIterator.Close() + } + return ok +} + +// ReadBatch reads a set of entries off an iterator. +func ReadSampleBatch(i SampleIterator, size uint32) (*logproto.SampleQueryResponse, uint32, error) { + series := map[string]*logproto.Series{} + respSize := uint32(0) + for ; respSize < size && i.Next(); respSize++ { + labels, sample := i.Labels(), i.Sample() + s, ok := series[labels] + if !ok { + s = &logproto.Series{ + Labels: labels, + } + series[labels] = s + } + s.Samples = append(s.Samples, sample) + } + + result := logproto.SampleQueryResponse{ + Series: make([]logproto.Series, 0, len(series)), + } + for _, s := range series { + result.Series = append(result.Series, *s) + } + return &result, respSize, i.Error() +} diff --git a/pkg/iter/sample_iterator_test.go b/pkg/iter/sample_iterator_test.go new file mode 100644 index 0000000000..61eea8816f --- /dev/null +++ b/pkg/iter/sample_iterator_test.go @@ -0,0 +1,195 @@ +package iter + +import ( + "context" + "io" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/logproto" +) + +func TestNewPeekingSampleIterator(t *testing.T) { + iter := NewPeekingSampleIterator(NewSeriesIterator(logproto.Series{ + Samples: []logproto.Sample{ + { + Timestamp: time.Unix(0, 1).UnixNano(), + }, + { + Timestamp: time.Unix(0, 2).UnixNano(), + }, + { + Timestamp: time.Unix(0, 3).UnixNano(), + }, + }, + })) + _, peek, ok := iter.Peek() + if peek.Timestamp != 1 { + t.Fatal("wrong peeked time.") + } + if !ok { + t.Fatal("should be ok.") + } + hasNext := iter.Next() + if !hasNext { + t.Fatal("should have next.") + } + if iter.Sample().Timestamp != 1 { + t.Fatal("wrong peeked time.") + } + + _, peek, ok = iter.Peek() + if peek.Timestamp != 2 { + t.Fatal("wrong peeked time.") + } + if !ok { + t.Fatal("should be ok.") + } + hasNext = iter.Next() + if !hasNext { + t.Fatal("should have next.") + } + if iter.Sample().Timestamp != 2 { + t.Fatal("wrong peeked time.") + } + _, peek, ok = iter.Peek() + if peek.Timestamp != 3 { + t.Fatal("wrong peeked time.") + } + if !ok { + t.Fatal("should be ok.") + } + hasNext = iter.Next() + if !hasNext { + t.Fatal("should have next.") + } + if iter.Sample().Timestamp != 3 { + t.Fatal("wrong peeked time.") + } + _, _, ok = iter.Peek() + if ok { + t.Fatal("should not be ok.") + } + require.NoError(t, iter.Close()) + require.NoError(t, iter.Error()) +} + +func sample(i int) logproto.Sample { + return logproto.Sample{ + Timestamp: int64(i), + Hash: uint64(i), + Value: float64(1), + } +} + +var varSeries = logproto.Series{ + Labels: `{foo="var"}`, + Samples: []logproto.Sample{ + sample(1), sample(2), sample(3), + }, +} +var carSeries = logproto.Series{ + Labels: `{foo="car"}`, + Samples: []logproto.Sample{ + sample(1), sample(2), sample(3), + }, +} + +func TestNewHeapSampleIterator(t *testing.T) { + it := NewHeapSampleIterator(context.Background(), + []SampleIterator{ + NewSeriesIterator(varSeries), + NewSeriesIterator(carSeries), + NewSeriesIterator(carSeries), + NewSeriesIterator(varSeries), + NewSeriesIterator(carSeries), + NewSeriesIterator(varSeries), + NewSeriesIterator(carSeries), + }) + + for i := 1; i < 4; i++ { + require.True(t, it.Next(), i) + require.Equal(t, `{foo="car"}`, it.Labels(), i) + require.Equal(t, sample(i), it.Sample(), i) + require.True(t, it.Next(), i) + require.Equal(t, `{foo="var"}`, it.Labels(), i) + require.Equal(t, sample(i), it.Sample(), i) + } + require.False(t, it.Next()) + require.NoError(t, it.Error()) + require.NoError(t, it.Close()) +} + +type fakeSampleClient struct { + series [][]logproto.Series + curr int +} + +func (f *fakeSampleClient) Recv() (*logproto.SampleQueryResponse, error) { + if f.curr >= len(f.series) { + return nil, io.EOF + } + res := &logproto.SampleQueryResponse{ + Series: f.series[f.curr], + } + f.curr++ + return res, nil +} + +func (fakeSampleClient) Context() context.Context { return context.Background() } +func (fakeSampleClient) CloseSend() error { return nil } +func TestNewSampleQueryClientIterator(t *testing.T) { + + it := NewSampleQueryClientIterator(&fakeSampleClient{ + series: [][]logproto.Series{ + {varSeries}, + {carSeries}, + }, + }) + for i := 1; i < 4; i++ { + require.True(t, it.Next(), i) + require.Equal(t, `{foo="var"}`, it.Labels(), i) + require.Equal(t, sample(i), it.Sample(), i) + } + for i := 1; i < 4; i++ { + require.True(t, it.Next(), i) + require.Equal(t, `{foo="car"}`, it.Labels(), i) + require.Equal(t, sample(i), it.Sample(), i) + } + require.False(t, it.Next()) + require.NoError(t, it.Error()) + require.NoError(t, it.Close()) +} + +func TestNewNonOverlappingSampleIterator(t *testing.T) { + it := NewNonOverlappingSampleIterator([]SampleIterator{ + NewSeriesIterator(varSeries), + NewSeriesIterator(logproto.Series{ + Labels: varSeries.Labels, + Samples: []logproto.Sample{sample(4), sample(5)}, + }), + }, varSeries.Labels) + + for i := 1; i < 6; i++ { + require.True(t, it.Next(), i) + require.Equal(t, `{foo="var"}`, it.Labels(), i) + require.Equal(t, sample(i), it.Sample(), i) + } + require.False(t, it.Next()) + require.NoError(t, it.Error()) + require.NoError(t, it.Close()) +} + +func TestReadSampleBatch(t *testing.T) { + res, size, err := ReadSampleBatch(NewSeriesIterator(carSeries), 1) + require.Equal(t, &logproto.SampleQueryResponse{Series: []logproto.Series{{Labels: carSeries.Labels, Samples: []logproto.Sample{sample(1)}}}}, res) + require.Equal(t, uint32(1), size) + require.NoError(t, err) + + res, size, err = ReadSampleBatch(NewMultiSeriesIterator(context.Background(), []logproto.Series{carSeries, varSeries}), 100) + require.ElementsMatch(t, []logproto.Series{carSeries, varSeries}, res.Series) + require.Equal(t, uint32(6), size) + require.NoError(t, err) +} diff --git a/pkg/logcli/query/query.go b/pkg/logcli/query/query.go index e0ddafcc96..54f5b94643 100644 --- a/pkg/logcli/query/query.go +++ b/pkg/logcli/query/query.go @@ -18,7 +18,6 @@ import ( "github.com/weaveworks/common/user" "github.com/grafana/loki/pkg/cfg" - "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logcli/client" "github.com/grafana/loki/pkg/logcli/output" "github.com/grafana/loki/pkg/loghttp" @@ -117,7 +116,12 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string return err } - querier, err := localStore(conf) + limits, err := validation.NewOverrides(conf.LimitsConfig, nil) + if err != nil { + return err + } + + querier, err := storage.NewStore(conf.StorageConfig, conf.ChunkStoreConfig, conf.SchemaConfig, limits, prometheus.DefaultRegisterer) if err != nil { return err } @@ -169,20 +173,6 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string return nil } -func localStore(conf loki.Config) (logql.Querier, error) { - limits, err := validation.NewOverrides(conf.LimitsConfig, nil) - if err != nil { - return nil, err - } - s, err := storage.NewStore(conf.StorageConfig, conf.ChunkStoreConfig, conf.SchemaConfig, limits, prometheus.DefaultRegisterer) - if err != nil { - return nil, err - } - return logql.QuerierFunc(func(ctx context.Context, params logql.SelectParams) (iter.EntryIterator, error) { - return s.LazyQuery(ctx, params) - }), nil -} - // SetInstant makes the Query an instant type func (q *Query) SetInstant(time time.Time) { q.Start = time diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index 41fdcfde95..b07257fc9e 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -6,6 +6,7 @@ package logproto import ( bytes "bytes" context "context" + encoding_binary "encoding/binary" fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" @@ -208,6 +209,109 @@ func (m *QueryRequest) GetShards() []string { return nil } +type SampleQueryRequest struct { + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"` + Shards []string `protobuf:"bytes,4,rep,name=shards,proto3" json:"shards,omitempty"` +} + +func (m *SampleQueryRequest) Reset() { *m = SampleQueryRequest{} } +func (*SampleQueryRequest) ProtoMessage() {} +func (*SampleQueryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{3} +} +func (m *SampleQueryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SampleQueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SampleQueryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SampleQueryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SampleQueryRequest.Merge(m, src) +} +func (m *SampleQueryRequest) XXX_Size() int { + return m.Size() +} +func (m *SampleQueryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SampleQueryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SampleQueryRequest proto.InternalMessageInfo + +func (m *SampleQueryRequest) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *SampleQueryRequest) GetStart() time.Time { + if m != nil { + return m.Start + } + return time.Time{} +} + +func (m *SampleQueryRequest) GetEnd() time.Time { + if m != nil { + return m.End + } + return time.Time{} +} + +func (m *SampleQueryRequest) GetShards() []string { + if m != nil { + return m.Shards + } + return nil +} + +type SampleQueryResponse struct { + Series []Series `protobuf:"bytes,1,rep,name=series,proto3,customtype=Series" json:"series,omitempty"` +} + +func (m *SampleQueryResponse) Reset() { *m = SampleQueryResponse{} } +func (*SampleQueryResponse) ProtoMessage() {} +func (*SampleQueryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{4} +} +func (m *SampleQueryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SampleQueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SampleQueryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SampleQueryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SampleQueryResponse.Merge(m, src) +} +func (m *SampleQueryResponse) XXX_Size() int { + return m.Size() +} +func (m *SampleQueryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SampleQueryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SampleQueryResponse proto.InternalMessageInfo + type QueryResponse struct { Streams []Stream `protobuf:"bytes,1,rep,name=streams,proto3,customtype=Stream" json:"streams,omitempty"` } @@ -215,7 +319,7 @@ type QueryResponse struct { func (m *QueryResponse) Reset() { *m = QueryResponse{} } func (*QueryResponse) ProtoMessage() {} func (*QueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{3} + return fileDescriptor_c28a5f14f1f4c79a, []int{5} } func (m *QueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -254,7 +358,7 @@ type LabelRequest struct { func (m *LabelRequest) Reset() { *m = LabelRequest{} } func (*LabelRequest) ProtoMessage() {} func (*LabelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{4} + return fileDescriptor_c28a5f14f1f4c79a, []int{6} } func (m *LabelRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -318,7 +422,7 @@ type LabelResponse struct { func (m *LabelResponse) Reset() { *m = LabelResponse{} } func (*LabelResponse) ProtoMessage() {} func (*LabelResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{5} + return fileDescriptor_c28a5f14f1f4c79a, []int{7} } func (m *LabelResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -362,7 +466,7 @@ type StreamAdapter struct { func (m *StreamAdapter) Reset() { *m = StreamAdapter{} } func (*StreamAdapter) ProtoMessage() {} func (*StreamAdapter) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{6} + return fileDescriptor_c28a5f14f1f4c79a, []int{8} } func (m *StreamAdapter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +517,7 @@ type EntryAdapter struct { func (m *EntryAdapter) Reset() { *m = EntryAdapter{} } func (*EntryAdapter) ProtoMessage() {} func (*EntryAdapter) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{7} + return fileDescriptor_c28a5f14f1f4c79a, []int{9} } func (m *EntryAdapter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -456,6 +560,116 @@ func (m *EntryAdapter) GetLine() string { return "" } +type Sample struct { + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"ts"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value"` + Hash uint64 `protobuf:"varint,3,opt,name=hash,proto3" json:"hash"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{10} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Sample) GetHash() uint64 { + if m != nil { + return m.Hash + } + return 0 +} + +type Series struct { + Labels string `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` +} + +func (m *Series) Reset() { *m = Series{} } +func (*Series) ProtoMessage() {} +func (*Series) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{11} +} +func (m *Series) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Series) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Series.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Series) XXX_Merge(src proto.Message) { + xxx_messageInfo_Series.Merge(m, src) +} +func (m *Series) XXX_Size() int { + return m.Size() +} +func (m *Series) XXX_DiscardUnknown() { + xxx_messageInfo_Series.DiscardUnknown(m) +} + +var xxx_messageInfo_Series proto.InternalMessageInfo + +func (m *Series) GetLabels() string { + if m != nil { + return m.Labels + } + return "" +} + +func (m *Series) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + type TailRequest struct { Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` DelayFor uint32 `protobuf:"varint,3,opt,name=delayFor,proto3" json:"delayFor,omitempty"` @@ -466,7 +680,7 @@ type TailRequest struct { func (m *TailRequest) Reset() { *m = TailRequest{} } func (*TailRequest) ProtoMessage() {} func (*TailRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{8} + return fileDescriptor_c28a5f14f1f4c79a, []int{12} } func (m *TailRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -531,7 +745,7 @@ type TailResponse struct { func (m *TailResponse) Reset() { *m = TailResponse{} } func (*TailResponse) ProtoMessage() {} func (*TailResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{9} + return fileDescriptor_c28a5f14f1f4c79a, []int{13} } func (m *TailResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -576,7 +790,7 @@ type SeriesRequest struct { func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } func (*SeriesRequest) ProtoMessage() {} func (*SeriesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{10} + return fileDescriptor_c28a5f14f1f4c79a, []int{14} } func (m *SeriesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -633,7 +847,7 @@ type SeriesResponse struct { func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } func (*SeriesResponse) ProtoMessage() {} func (*SeriesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{11} + return fileDescriptor_c28a5f14f1f4c79a, []int{15} } func (m *SeriesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -676,7 +890,7 @@ type SeriesIdentifier struct { func (m *SeriesIdentifier) Reset() { *m = SeriesIdentifier{} } func (*SeriesIdentifier) ProtoMessage() {} func (*SeriesIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{12} + return fileDescriptor_c28a5f14f1f4c79a, []int{16} } func (m *SeriesIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -721,7 +935,7 @@ type DroppedStream struct { func (m *DroppedStream) Reset() { *m = DroppedStream{} } func (*DroppedStream) ProtoMessage() {} func (*DroppedStream) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{13} + return fileDescriptor_c28a5f14f1f4c79a, []int{17} } func (m *DroppedStream) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -781,7 +995,7 @@ type TimeSeriesChunk struct { func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } func (*TimeSeriesChunk) ProtoMessage() {} func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{14} + return fileDescriptor_c28a5f14f1f4c79a, []int{18} } func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -846,7 +1060,7 @@ type LabelPair struct { func (m *LabelPair) Reset() { *m = LabelPair{} } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{15} + return fileDescriptor_c28a5f14f1f4c79a, []int{19} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -896,7 +1110,7 @@ type Chunk struct { func (m *Chunk) Reset() { *m = Chunk{} } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{16} + return fileDescriptor_c28a5f14f1f4c79a, []int{20} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -938,7 +1152,7 @@ type TransferChunksResponse struct { func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } func (*TransferChunksResponse) ProtoMessage() {} func (*TransferChunksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{17} + return fileDescriptor_c28a5f14f1f4c79a, []int{21} } func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -973,7 +1187,7 @@ type TailersCountRequest struct { func (m *TailersCountRequest) Reset() { *m = TailersCountRequest{} } func (*TailersCountRequest) ProtoMessage() {} func (*TailersCountRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{18} + return fileDescriptor_c28a5f14f1f4c79a, []int{22} } func (m *TailersCountRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1009,7 +1223,7 @@ type TailersCountResponse struct { func (m *TailersCountResponse) Reset() { *m = TailersCountResponse{} } func (*TailersCountResponse) ProtoMessage() {} func (*TailersCountResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{19} + return fileDescriptor_c28a5f14f1f4c79a, []int{23} } func (m *TailersCountResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1050,11 +1264,15 @@ func init() { proto.RegisterType((*PushRequest)(nil), "logproto.PushRequest") proto.RegisterType((*PushResponse)(nil), "logproto.PushResponse") proto.RegisterType((*QueryRequest)(nil), "logproto.QueryRequest") + proto.RegisterType((*SampleQueryRequest)(nil), "logproto.SampleQueryRequest") + proto.RegisterType((*SampleQueryResponse)(nil), "logproto.SampleQueryResponse") proto.RegisterType((*QueryResponse)(nil), "logproto.QueryResponse") proto.RegisterType((*LabelRequest)(nil), "logproto.LabelRequest") proto.RegisterType((*LabelResponse)(nil), "logproto.LabelResponse") proto.RegisterType((*StreamAdapter)(nil), "logproto.StreamAdapter") proto.RegisterType((*EntryAdapter)(nil), "logproto.EntryAdapter") + proto.RegisterType((*Sample)(nil), "logproto.Sample") + proto.RegisterType((*Series)(nil), "logproto.Series") proto.RegisterType((*TailRequest)(nil), "logproto.TailRequest") proto.RegisterType((*TailResponse)(nil), "logproto.TailResponse") proto.RegisterType((*SeriesRequest)(nil), "logproto.SeriesRequest") @@ -1073,81 +1291,89 @@ func init() { func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } var fileDescriptor_c28a5f14f1f4c79a = []byte{ - // 1169 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0xd6, 0x4a, 0x14, 0x25, 0x8d, 0x7e, 0x22, 0x6c, 0x1c, 0x5b, 0x55, 0x5a, 0x4a, 0x20, 0x8a, - 0x44, 0x68, 0x5d, 0xa9, 0x75, 0xff, 0x9c, 0xf4, 0x0f, 0x96, 0xdd, 0x20, 0x76, 0x0b, 0x24, 0xa1, - 0x0d, 0x04, 0x08, 0x50, 0x04, 0xb4, 0xb9, 0x96, 0x09, 0x4b, 0xa4, 0x42, 0xae, 0x0c, 0xf8, 0xd6, - 0x07, 0x68, 0x81, 0xdc, 0x7a, 0xc8, 0x0b, 0x14, 0x3d, 0xf4, 0x39, 0x7c, 0x2a, 0x7c, 0x0c, 0x7a, - 0x50, 0x6b, 0xf9, 0x52, 0xf8, 0x94, 0x47, 0x28, 0xf6, 0x87, 0xe4, 0x4a, 0xb1, 0x8b, 0x2a, 0x17, - 0x69, 0x67, 0x76, 0x66, 0x67, 0xe6, 0x9b, 0x6f, 0x76, 0x09, 0x37, 0x87, 0x87, 0xbd, 0x4e, 0xdf, - 0xef, 0x0d, 0x03, 0x9f, 0xfa, 0xf1, 0xa2, 0xcd, 0x7f, 0x71, 0x3e, 0x92, 0xeb, 0x8d, 0x9e, 0xef, - 0xf7, 0xfa, 0xa4, 0xc3, 0xa5, 0xdd, 0xd1, 0x7e, 0x87, 0xba, 0x03, 0x12, 0x52, 0x7b, 0x30, 0x14, - 0xa6, 0xf5, 0x0f, 0x7a, 0x2e, 0x3d, 0x18, 0xed, 0xb6, 0xf7, 0xfc, 0x41, 0xa7, 0xe7, 0xf7, 0xfc, - 0xc4, 0x92, 0x49, 0xe2, 0x74, 0xb6, 0x12, 0xe6, 0xe6, 0x63, 0x28, 0x3e, 0x1c, 0x85, 0x07, 0x16, - 0x79, 0x36, 0x22, 0x21, 0xc5, 0xf7, 0x21, 0x17, 0xd2, 0x80, 0xd8, 0x83, 0xb0, 0x86, 0x9a, 0x99, - 0x56, 0x71, 0x65, 0xa9, 0x1d, 0xa7, 0xb2, 0xcd, 0x37, 0xd6, 0x1c, 0x7b, 0x48, 0x49, 0xd0, 0xbd, - 0xf1, 0xe7, 0xb8, 0xa1, 0x0b, 0xd5, 0xc5, 0xb8, 0x11, 0x79, 0x59, 0xd1, 0xc2, 0xac, 0x40, 0x49, - 0x1c, 0x1c, 0x0e, 0x7d, 0x2f, 0x24, 0xe6, 0x8b, 0x34, 0x94, 0x1e, 0x8d, 0x48, 0x70, 0x1c, 0x85, - 0xaa, 0x43, 0x3e, 0x24, 0x7d, 0xb2, 0x47, 0xfd, 0xa0, 0x86, 0x9a, 0xa8, 0x55, 0xb0, 0x62, 0x19, - 0x2f, 0x40, 0xb6, 0xef, 0x0e, 0x5c, 0x5a, 0x4b, 0x37, 0x51, 0xab, 0x6c, 0x09, 0x01, 0xdf, 0x85, - 0x6c, 0x48, 0xed, 0x80, 0xd6, 0x32, 0x4d, 0xd4, 0x2a, 0xae, 0xd4, 0xdb, 0x02, 0x8b, 0x76, 0x54, - 0x61, 0x7b, 0x27, 0xc2, 0xa2, 0x9b, 0x3f, 0x19, 0x37, 0x52, 0xcf, 0xff, 0x6a, 0x20, 0x4b, 0xb8, - 0xe0, 0xcf, 0x20, 0x43, 0x3c, 0xa7, 0xa6, 0xcd, 0xe1, 0xc9, 0x1c, 0xf0, 0x47, 0x50, 0x70, 0xdc, - 0x80, 0xec, 0x51, 0xd7, 0xf7, 0x6a, 0xd9, 0x26, 0x6a, 0x55, 0x56, 0xae, 0x27, 0x90, 0x6c, 0x44, - 0x5b, 0x56, 0x62, 0x85, 0x97, 0x41, 0x0f, 0x0f, 0xec, 0xc0, 0x09, 0x6b, 0xb9, 0x66, 0xa6, 0x55, - 0xe8, 0x2e, 0x5c, 0x8c, 0x1b, 0x55, 0xa1, 0x59, 0xf6, 0x07, 0x2e, 0x25, 0x83, 0x21, 0x3d, 0xb6, - 0xa4, 0xcd, 0x96, 0x96, 0xd7, 0xab, 0x39, 0xd3, 0x82, 0xb2, 0x04, 0x47, 0xc0, 0x85, 0xd7, 0xfe, - 0x77, 0x23, 0x2a, 0x27, 0xe3, 0x06, 0x4a, 0x9a, 0x91, 0x74, 0xe0, 0x77, 0x04, 0xa5, 0xef, 0xed, - 0x5d, 0xd2, 0x8f, 0x10, 0xc7, 0xa0, 0x79, 0xf6, 0x80, 0x48, 0xb4, 0xf9, 0x1a, 0x2f, 0x82, 0x7e, - 0x64, 0xf7, 0x47, 0x24, 0xe4, 0x50, 0xe7, 0x2d, 0x29, 0xcd, 0x8b, 0x35, 0x7a, 0x63, 0xac, 0x51, - 0x8c, 0xb5, 0x79, 0x1b, 0xca, 0x32, 0x5f, 0x09, 0x42, 0x92, 0x1c, 0xc3, 0xa0, 0x10, 0x25, 0x67, - 0x1e, 0x41, 0x79, 0x0a, 0x03, 0x6c, 0x82, 0xde, 0x67, 0x9e, 0xa1, 0xa8, 0xad, 0x0b, 0x17, 0xe3, - 0x86, 0xd4, 0x58, 0xf2, 0x9f, 0x21, 0x4a, 0x3c, 0x1a, 0xb8, 0xbc, 0x54, 0x86, 0xe8, 0x62, 0x82, - 0xe8, 0xb7, 0x1e, 0x0d, 0x8e, 0x23, 0x40, 0xaf, 0x31, 0x06, 0x30, 0x4e, 0x4b, 0x73, 0x2b, 0x5a, - 0x98, 0x47, 0x50, 0x52, 0x2d, 0xf1, 0x7d, 0x28, 0xc4, 0xe3, 0xc7, 0x23, 0xff, 0x77, 0xb9, 0x15, - 0x79, 0x70, 0x9a, 0x86, 0xbc, 0xe8, 0xc4, 0x19, 0xbf, 0x0d, 0x5a, 0xdf, 0xf5, 0x08, 0x6f, 0x42, - 0xa1, 0x9b, 0xbf, 0x18, 0x37, 0xb8, 0x6c, 0xf1, 0x5f, 0xf3, 0x17, 0x04, 0xc5, 0x1d, 0xdb, 0x8d, - 0x1b, 0xb9, 0x00, 0xd9, 0x67, 0x8c, 0x2d, 0xb2, 0x93, 0x42, 0x60, 0x03, 0xe5, 0x90, 0xbe, 0x7d, - 0x7c, 0xcf, 0x0f, 0x78, 0xd7, 0xca, 0x56, 0x2c, 0x27, 0x03, 0xa5, 0x5d, 0x3a, 0x50, 0xd9, 0xb9, - 0x07, 0x6a, 0x4b, 0xcb, 0xa7, 0xab, 0x19, 0xf3, 0x27, 0x04, 0x25, 0x91, 0x99, 0x6c, 0xd9, 0x17, - 0xa0, 0x0b, 0xfe, 0x49, 0x3c, 0xae, 0xa4, 0x2d, 0x28, 0x94, 0x95, 0x2e, 0xf8, 0x1b, 0xa8, 0x38, - 0x81, 0x3f, 0x1c, 0x12, 0x67, 0x5b, 0x72, 0x3f, 0x3d, 0xcb, 0xfd, 0x0d, 0x75, 0xdf, 0x9a, 0x31, - 0x37, 0x5f, 0x20, 0x28, 0x6f, 0x13, 0xde, 0x34, 0x09, 0x55, 0x5c, 0x22, 0x7a, 0xe3, 0x3b, 0x23, - 0x3d, 0xef, 0x9d, 0xb1, 0x08, 0x7a, 0x2f, 0xf0, 0x47, 0xc3, 0xb0, 0x96, 0x11, 0xb4, 0x15, 0x92, - 0xb9, 0x05, 0x95, 0x28, 0x39, 0x89, 0xd6, 0x2a, 0xe8, 0x21, 0xd7, 0xc8, 0x21, 0xaf, 0x2b, 0x68, - 0x71, 0xfd, 0xa6, 0x43, 0x3c, 0xea, 0xee, 0xbb, 0x24, 0xe8, 0x6a, 0x2c, 0x88, 0x25, 0xed, 0xcd, - 0x9f, 0x11, 0x54, 0x67, 0x4d, 0xf0, 0xd7, 0xca, 0x18, 0xb0, 0xe3, 0x6e, 0x5d, 0x7d, 0x5c, 0x9b, - 0x4f, 0x5a, 0xc8, 0xe9, 0x1c, 0x8d, 0x48, 0xfd, 0x0e, 0x14, 0x15, 0x35, 0xae, 0x42, 0xe6, 0x90, - 0x44, 0x24, 0x63, 0x4b, 0x46, 0x23, 0x3e, 0x82, 0x82, 0xa7, 0x96, 0x10, 0xee, 0xa6, 0x57, 0x11, - 0xa3, 0x68, 0x79, 0xaa, 0x37, 0x78, 0x15, 0xb4, 0xfd, 0xc0, 0x1f, 0xcc, 0x05, 0x3c, 0xf7, 0xc0, - 0x9f, 0x40, 0x9a, 0xfa, 0x73, 0xc1, 0x9e, 0xa6, 0x3e, 0x43, 0x5d, 0x16, 0x9f, 0xe1, 0xc9, 0x49, - 0xc9, 0xfc, 0x0d, 0xc1, 0x35, 0xe6, 0x23, 0x10, 0x58, 0x3f, 0x18, 0x79, 0x87, 0xb8, 0x05, 0x55, - 0x16, 0xe9, 0xa9, 0xeb, 0xf5, 0x48, 0x48, 0x49, 0xf0, 0xd4, 0x75, 0x64, 0x99, 0x15, 0xa6, 0xdf, - 0x94, 0xea, 0x4d, 0x07, 0x2f, 0x41, 0x6e, 0x14, 0x0a, 0x03, 0x51, 0xb3, 0xce, 0xc4, 0x4d, 0x07, - 0xbf, 0xaf, 0x84, 0x63, 0x58, 0x2b, 0xaf, 0x02, 0xc7, 0xf0, 0xa1, 0xed, 0x06, 0xf1, 0xdd, 0x73, - 0x1b, 0xf4, 0x3d, 0x16, 0x38, 0xac, 0x69, 0xdc, 0xf8, 0x5a, 0x62, 0xcc, 0x13, 0xb2, 0xe4, 0xb6, - 0xf9, 0x29, 0x14, 0x62, 0xef, 0x4b, 0xef, 0xeb, 0x4b, 0x3b, 0x60, 0xde, 0x84, 0xac, 0x28, 0x0c, - 0x83, 0xe6, 0xd8, 0xd4, 0xe6, 0x2e, 0x25, 0x8b, 0xaf, 0xcd, 0x1a, 0x2c, 0xee, 0x04, 0xb6, 0x17, - 0xee, 0x93, 0x80, 0x1b, 0xc5, 0xf4, 0x33, 0x6f, 0xc0, 0x75, 0x36, 0xbc, 0x24, 0x08, 0xd7, 0xfd, - 0x91, 0x47, 0xe5, 0xcc, 0x98, 0xcb, 0xb0, 0x30, 0xad, 0x96, 0x6c, 0x5d, 0x80, 0xec, 0x1e, 0x53, - 0xf0, 0xd3, 0xcb, 0x96, 0x10, 0xde, 0xbb, 0x05, 0x85, 0xf8, 0x19, 0xc4, 0x45, 0xc8, 0xdd, 0x7b, - 0x60, 0x3d, 0x5e, 0xb3, 0x36, 0xaa, 0x29, 0x5c, 0x82, 0x7c, 0x77, 0x6d, 0xfd, 0x3b, 0x2e, 0xa1, - 0x95, 0x35, 0xd0, 0xd9, 0x07, 0x01, 0x09, 0xf0, 0xe7, 0xa0, 0xb1, 0x15, 0xbe, 0x91, 0xa0, 0xa0, - 0x7c, 0x83, 0xd4, 0x17, 0x67, 0xd5, 0x32, 0xdb, 0xd4, 0xca, 0x1f, 0x69, 0xc8, 0xb1, 0x67, 0x92, - 0x71, 0xfd, 0x4b, 0xc8, 0xf2, 0x17, 0x13, 0x2b, 0xe6, 0xea, 0xf7, 0x45, 0x7d, 0xe9, 0x35, 0x7d, - 0x74, 0xce, 0x87, 0x88, 0x5d, 0x0b, 0x1c, 0x67, 0xd5, 0x5b, 0x7d, 0x2b, 0x55, 0xef, 0xa9, 0x37, - 0xc9, 0x4c, 0xe1, 0x3b, 0xa0, 0x31, 0x78, 0xd4, 0xf4, 0x95, 0xcb, 0x59, 0x4d, 0x5f, 0xbd, 0x19, - 0x79, 0xd8, 0xaf, 0x40, 0x17, 0x34, 0xc4, 0x4b, 0xb3, 0xa3, 0x19, 0xb9, 0xd7, 0x5e, 0xdf, 0x88, - 0x23, 0x3f, 0x10, 0x97, 0x6d, 0xd4, 0x18, 0xfc, 0xce, 0x74, 0xa8, 0x99, 0x3e, 0xd6, 0x8d, 0xab, - 0xb6, 0x63, 0x40, 0x7f, 0x80, 0x7c, 0xc4, 0x75, 0xfc, 0x08, 0x2a, 0xd3, 0x34, 0xc1, 0x6f, 0x29, - 0xfe, 0xd3, 0x03, 0x54, 0x6f, 0x2a, 0x5b, 0x97, 0x73, 0x2b, 0xd5, 0x42, 0xdd, 0x27, 0xa7, 0x67, - 0x46, 0xea, 0xe5, 0x99, 0x91, 0x7a, 0x75, 0x66, 0xa0, 0x1f, 0x27, 0x06, 0xfa, 0x75, 0x62, 0xa0, - 0x93, 0x89, 0x81, 0x4e, 0x27, 0x06, 0xfa, 0x7b, 0x62, 0xa0, 0x7f, 0x26, 0x46, 0xea, 0xd5, 0xc4, - 0x40, 0xcf, 0xcf, 0x8d, 0xd4, 0xe9, 0xb9, 0x91, 0x7a, 0x79, 0x6e, 0xa4, 0x9e, 0xbc, 0xab, 0x7e, - 0xc1, 0x06, 0xf6, 0xbe, 0xed, 0xd9, 0x9d, 0xbe, 0x7f, 0xe8, 0x76, 0xd4, 0x2f, 0xe4, 0x5d, 0x9d, - 0xff, 0x7d, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x02, 0x6d, 0x76, 0x3a, 0x38, 0x0b, 0x00, - 0x00, + // 1302 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x4b, 0x6f, 0xdb, 0x46, + 0x10, 0xd6, 0x4a, 0x14, 0x25, 0x8d, 0x1e, 0x11, 0x36, 0x8e, 0xad, 0x2a, 0x09, 0x25, 0x10, 0x41, + 0x22, 0xb4, 0xa9, 0xd4, 0xaa, 0xaf, 0x3c, 0xfa, 0x80, 0x15, 0x37, 0x88, 0xdd, 0xa2, 0x4e, 0x68, + 0x03, 0x01, 0x02, 0x14, 0x01, 0x6d, 0xae, 0x25, 0xc2, 0x12, 0xa9, 0x90, 0x94, 0x01, 0xdf, 0xfa, + 0x03, 0x5a, 0x20, 0xb7, 0x1e, 0x02, 0xf4, 0x5c, 0xf4, 0xd0, 0xdf, 0xe1, 0xa3, 0xd1, 0x53, 0xd0, + 0x83, 0x5a, 0xcb, 0x97, 0xc2, 0xa7, 0xfc, 0x84, 0x62, 0x1f, 0x24, 0x57, 0x8a, 0xdd, 0x5a, 0xbe, + 0x88, 0x3b, 0xb3, 0x33, 0xb3, 0xb3, 0xdf, 0x7e, 0x33, 0xbb, 0x82, 0xab, 0xc3, 0xdd, 0x6e, 0xab, + 0xef, 0x76, 0x87, 0x9e, 0x1b, 0xb8, 0xd1, 0xa0, 0xc9, 0x7e, 0x71, 0x36, 0x94, 0xab, 0xb5, 0xae, + 0xeb, 0x76, 0xfb, 0xa4, 0xc5, 0xa4, 0xad, 0xd1, 0x4e, 0x2b, 0xb0, 0x07, 0xc4, 0x0f, 0xcc, 0xc1, + 0x90, 0x9b, 0x56, 0xdf, 0xef, 0xda, 0x41, 0x6f, 0xb4, 0xd5, 0xdc, 0x76, 0x07, 0xad, 0xae, 0xdb, + 0x75, 0x63, 0x4b, 0x2a, 0xf1, 0xe8, 0x74, 0xc4, 0xcd, 0xf5, 0xa7, 0x90, 0x7f, 0x3c, 0xf2, 0x7b, + 0x06, 0x79, 0x31, 0x22, 0x7e, 0x80, 0x1f, 0x41, 0xc6, 0x0f, 0x3c, 0x62, 0x0e, 0xfc, 0x0a, 0xaa, + 0xa7, 0x1a, 0xf9, 0xf6, 0x52, 0x33, 0x4a, 0x65, 0x83, 0x4d, 0x2c, 0x5b, 0xe6, 0x30, 0x20, 0x5e, + 0xe7, 0xca, 0x9f, 0xe3, 0x9a, 0xca, 0x55, 0x27, 0xe3, 0x5a, 0xe8, 0x65, 0x84, 0x03, 0xbd, 0x04, + 0x05, 0x1e, 0xd8, 0x1f, 0xba, 0x8e, 0x4f, 0xf4, 0x57, 0x49, 0x28, 0x3c, 0x19, 0x11, 0x6f, 0x3f, + 0x5c, 0xaa, 0x0a, 0x59, 0x9f, 0xf4, 0xc9, 0x76, 0xe0, 0x7a, 0x15, 0x54, 0x47, 0x8d, 0x9c, 0x11, + 0xc9, 0x78, 0x01, 0xd2, 0x7d, 0x7b, 0x60, 0x07, 0x95, 0x64, 0x1d, 0x35, 0x8a, 0x06, 0x17, 0xf0, + 0x3d, 0x48, 0xfb, 0x81, 0xe9, 0x05, 0x95, 0x54, 0x1d, 0x35, 0xf2, 0xed, 0x6a, 0x93, 0x63, 0xd1, + 0x0c, 0x77, 0xd8, 0xdc, 0x0c, 0xb1, 0xe8, 0x64, 0x0f, 0xc6, 0xb5, 0xc4, 0xcb, 0xbf, 0x6a, 0xc8, + 0xe0, 0x2e, 0xf8, 0x53, 0x48, 0x11, 0xc7, 0xaa, 0x28, 0x73, 0x78, 0x52, 0x07, 0xfc, 0x21, 0xe4, + 0x2c, 0xdb, 0x23, 0xdb, 0x81, 0xed, 0x3a, 0x95, 0x74, 0x1d, 0x35, 0x4a, 0xed, 0xcb, 0x31, 0x24, + 0x2b, 0xe1, 0x94, 0x11, 0x5b, 0xe1, 0xdb, 0xa0, 0xfa, 0x3d, 0xd3, 0xb3, 0xfc, 0x4a, 0xa6, 0x9e, + 0x6a, 0xe4, 0x3a, 0x0b, 0x27, 0xe3, 0x5a, 0x99, 0x6b, 0x6e, 0xbb, 0x03, 0x3b, 0x20, 0x83, 0x61, + 0xb0, 0x6f, 0x08, 0x9b, 0x35, 0x25, 0xab, 0x96, 0x33, 0xfa, 0x1f, 0x08, 0xf0, 0x86, 0x39, 0x18, + 0xf6, 0xc9, 0xb9, 0x31, 0x8a, 0xd0, 0x48, 0x5e, 0x18, 0x8d, 0xd4, 0xbc, 0x68, 0xc4, 0x5b, 0x53, + 0xfe, 0x7f, 0x6b, 0xfa, 0x3a, 0x5c, 0x9e, 0xda, 0x13, 0x67, 0x02, 0xbe, 0x03, 0xaa, 0x4f, 0x3c, + 0x9b, 0x84, 0x14, 0x2b, 0x4b, 0x14, 0x63, 0xfa, 0x4e, 0xe9, 0x60, 0x5c, 0x43, 0x8c, 0x5f, 0x4c, + 0x36, 0x84, 0xbd, 0x6e, 0x40, 0x71, 0x3a, 0xd4, 0xf2, 0xb9, 0xe9, 0x1a, 0x87, 0x64, 0xea, 0x98, + 0xa7, 0xbf, 0x23, 0x28, 0x7c, 0x6b, 0x6e, 0x91, 0x7e, 0x88, 0x39, 0x06, 0xc5, 0x31, 0x07, 0x44, + 0xe0, 0xcd, 0xc6, 0x78, 0x11, 0xd4, 0x3d, 0xb3, 0x3f, 0x22, 0x3e, 0x03, 0x3b, 0x6b, 0x08, 0x69, + 0x5e, 0x46, 0xa2, 0x0b, 0x33, 0x12, 0x45, 0x67, 0xa0, 0xdf, 0x82, 0xa2, 0xc8, 0x57, 0x80, 0x10, + 0x27, 0x47, 0x31, 0xc8, 0x85, 0xc9, 0xe9, 0x7b, 0x50, 0x9c, 0xc2, 0x00, 0xeb, 0xa0, 0xf6, 0xa9, + 0xa7, 0xcf, 0xf7, 0xd6, 0x81, 0x93, 0x71, 0x4d, 0x68, 0x0c, 0xf1, 0xa5, 0x88, 0x12, 0x27, 0x60, + 0xa7, 0x93, 0x64, 0x88, 0x2e, 0xc6, 0x88, 0x7e, 0xed, 0x04, 0xde, 0x7e, 0x08, 0xe8, 0x25, 0xca, + 0x0c, 0x5a, 0xf9, 0xc2, 0xdc, 0x08, 0x07, 0xfa, 0x1e, 0x14, 0x64, 0x4b, 0xfc, 0x08, 0x72, 0x51, + 0x93, 0x62, 0x2b, 0xff, 0xf7, 0x76, 0x4b, 0x22, 0x70, 0x32, 0xf0, 0xd9, 0xa6, 0x63, 0x67, 0x7c, + 0x0d, 0x94, 0xbe, 0xed, 0x10, 0x76, 0x08, 0xb9, 0x4e, 0xf6, 0x64, 0x5c, 0x63, 0xb2, 0xc1, 0x7e, + 0xf5, 0x01, 0xa8, 0x9c, 0x6e, 0xf8, 0xc6, 0xec, 0x8a, 0xa9, 0x8e, 0xca, 0x23, 0xca, 0xd1, 0x6a, + 0x90, 0x66, 0x48, 0xb1, 0x70, 0xa8, 0x93, 0x3b, 0x19, 0xd7, 0xb8, 0xc2, 0xe0, 0x1f, 0xba, 0x5c, + 0xcf, 0xf4, 0x7b, 0xec, 0x70, 0x15, 0xbe, 0x1c, 0x95, 0x0d, 0xf6, 0xab, 0xdb, 0x20, 0xe8, 0x79, + 0x2e, 0x5c, 0xef, 0x43, 0xc6, 0x67, 0xc9, 0x85, 0xb8, 0xca, 0xac, 0x67, 0x13, 0x31, 0xa2, 0xc2, + 0xd0, 0x08, 0x07, 0xfa, 0xcf, 0x08, 0xf2, 0x9b, 0xa6, 0x1d, 0x51, 0x74, 0x01, 0xd2, 0x2f, 0x68, + 0x1d, 0x08, 0x8e, 0x72, 0x81, 0x36, 0x0b, 0x8b, 0xf4, 0xcd, 0xfd, 0x87, 0xae, 0xc7, 0x52, 0x2e, + 0x1a, 0x91, 0x1c, 0x37, 0x54, 0xe5, 0xd4, 0x86, 0x9a, 0x9e, 0xbb, 0x85, 0xac, 0x29, 0xd9, 0x64, + 0x39, 0xa5, 0xff, 0x88, 0xa0, 0xc0, 0x33, 0x13, 0x64, 0xbc, 0x0f, 0x2a, 0xaf, 0x2c, 0x71, 0xd2, + 0x67, 0x16, 0x24, 0x48, 0xc5, 0x28, 0x5c, 0xf0, 0x57, 0x50, 0xb2, 0x3c, 0x77, 0x38, 0x24, 0xd6, + 0x86, 0xa8, 0xea, 0xe4, 0x6c, 0x55, 0xaf, 0xc8, 0xf3, 0xc6, 0x8c, 0xb9, 0xfe, 0x0a, 0x41, 0x51, + 0xf4, 0x0c, 0x01, 0x55, 0xb4, 0x45, 0x74, 0xe1, 0x2e, 0x99, 0x9c, 0xb7, 0x4b, 0x2e, 0x82, 0xda, + 0xf5, 0xdc, 0xd1, 0xd0, 0xaf, 0xa4, 0x78, 0x41, 0x72, 0x49, 0x5f, 0x83, 0x52, 0x98, 0xdc, 0x19, + 0xad, 0xb0, 0x3a, 0xdb, 0x0a, 0x57, 0x2d, 0xe2, 0x04, 0xf6, 0x8e, 0x4d, 0xbc, 0x8e, 0x42, 0x17, + 0x89, 0x5a, 0xe1, 0x4f, 0x08, 0xca, 0xb3, 0x26, 0xf8, 0x4b, 0x89, 0x88, 0x34, 0xdc, 0xcd, 0xb3, + 0xc3, 0x35, 0x59, 0x0f, 0xf1, 0x59, 0xa1, 0x86, 0x24, 0xad, 0xde, 0x85, 0xbc, 0xa4, 0xc6, 0x65, + 0x48, 0xed, 0x92, 0x90, 0x64, 0x74, 0x48, 0x69, 0x14, 0x97, 0x4c, 0x4e, 0xd4, 0xc9, 0xbd, 0xe4, + 0x1d, 0x44, 0x29, 0x5a, 0x9c, 0x3a, 0x1b, 0x7c, 0x07, 0x94, 0x1d, 0xcf, 0x1d, 0xcc, 0x05, 0x3c, + 0xf3, 0xc0, 0x1f, 0x43, 0x32, 0x70, 0xe7, 0x82, 0x3d, 0x19, 0xb8, 0x14, 0x75, 0xb1, 0xf9, 0x14, + 0x4b, 0x4e, 0x48, 0xfa, 0x6f, 0x08, 0x2e, 0x51, 0x1f, 0x8e, 0xc0, 0x83, 0xde, 0xc8, 0xd9, 0xc5, + 0x0d, 0x28, 0xd3, 0x95, 0x9e, 0xdb, 0x4e, 0x97, 0xf8, 0x01, 0xf1, 0x9e, 0xdb, 0x96, 0xd8, 0x66, + 0x89, 0xea, 0x57, 0x85, 0x7a, 0xd5, 0xc2, 0x4b, 0x90, 0x19, 0xf9, 0xdc, 0x80, 0xef, 0x59, 0xa5, + 0xe2, 0xaa, 0x85, 0xdf, 0x93, 0x96, 0xa3, 0x58, 0x4b, 0xaf, 0x02, 0x86, 0xe1, 0x63, 0xd3, 0xf6, + 0xa2, 0xea, 0xbf, 0x05, 0xea, 0x36, 0x5d, 0x98, 0xdf, 0x9b, 0xf9, 0xf6, 0xa5, 0xd8, 0x98, 0x25, + 0x64, 0x88, 0x69, 0xfd, 0x13, 0xc8, 0x45, 0xde, 0xa7, 0xde, 0x44, 0xa7, 0x9e, 0x80, 0x7e, 0x15, + 0xd2, 0x7c, 0x63, 0x18, 0x14, 0xcb, 0x0c, 0x4c, 0xe6, 0x52, 0x30, 0xd8, 0x58, 0xaf, 0xc0, 0xe2, + 0xa6, 0x67, 0x3a, 0xfe, 0x0e, 0xf1, 0x98, 0x51, 0x44, 0x3f, 0xfd, 0x0a, 0x5c, 0xa6, 0xc5, 0x4b, + 0x3c, 0xff, 0x81, 0x3b, 0x72, 0x02, 0x51, 0x33, 0xfa, 0x6d, 0x58, 0x98, 0x56, 0x0b, 0xb6, 0x2e, + 0x40, 0x7a, 0x9b, 0x2a, 0x58, 0xf4, 0xa2, 0xc1, 0x85, 0x77, 0x6f, 0x42, 0x2e, 0x7a, 0x06, 0xe1, + 0x3c, 0x64, 0x1e, 0xae, 0x1b, 0x4f, 0x97, 0x8d, 0x95, 0x72, 0x02, 0x17, 0x20, 0xdb, 0x59, 0x7e, + 0xf0, 0x0d, 0x93, 0x50, 0x7b, 0x19, 0x54, 0xfa, 0x20, 0x24, 0x1e, 0xfe, 0x0c, 0x14, 0x3a, 0xc2, + 0x57, 0x62, 0x14, 0xa4, 0x37, 0x68, 0x75, 0x71, 0x56, 0x2d, 0xb2, 0x4d, 0xb4, 0x7f, 0x49, 0x41, + 0x86, 0x3e, 0x00, 0x28, 0xd7, 0x3f, 0x87, 0x34, 0x7b, 0x0b, 0x60, 0xc9, 0x5c, 0x7e, 0x3b, 0x55, + 0x97, 0xde, 0xd2, 0x87, 0x71, 0x3e, 0x40, 0xf8, 0x3b, 0xc8, 0x33, 0xa5, 0xb8, 0x30, 0xae, 0xcd, + 0x36, 0xe3, 0xa9, 0x48, 0xd7, 0xcf, 0x98, 0x95, 0xe2, 0xdd, 0x83, 0x34, 0x3b, 0x37, 0x39, 0x1b, + 0xf9, 0x55, 0x21, 0x67, 0x33, 0x75, 0x7b, 0xeb, 0x09, 0x7c, 0x17, 0x14, 0x0a, 0xb7, 0x0c, 0x87, + 0xd4, 0xec, 0x65, 0x38, 0xe4, 0x4e, 0xcb, 0x96, 0xfd, 0x22, 0xba, 0x83, 0x96, 0x66, 0x4b, 0x3d, + 0x74, 0xaf, 0xbc, 0x3d, 0x11, 0xad, 0xbc, 0xce, 0x9b, 0x77, 0x78, 0xd0, 0xf8, 0xfa, 0xf4, 0x52, + 0x33, 0xbc, 0xa8, 0x6a, 0x67, 0x4d, 0x47, 0x07, 0xf4, 0x3d, 0x64, 0xc3, 0xda, 0xc1, 0x4f, 0xa0, + 0x34, 0x4d, 0x3b, 0xfc, 0x8e, 0xe4, 0x3f, 0x5d, 0x90, 0xd5, 0xba, 0x34, 0x75, 0x3a, 0x57, 0x13, + 0x0d, 0xd4, 0x79, 0x76, 0x78, 0xa4, 0x25, 0x5e, 0x1f, 0x69, 0x89, 0x37, 0x47, 0x1a, 0xfa, 0x61, + 0xa2, 0xa1, 0x5f, 0x27, 0x1a, 0x3a, 0x98, 0x68, 0xe8, 0x70, 0xa2, 0xa1, 0xbf, 0x27, 0x1a, 0xfa, + 0x67, 0xa2, 0x25, 0xde, 0x4c, 0x34, 0xf4, 0xf2, 0x58, 0x4b, 0x1c, 0x1e, 0x6b, 0x89, 0xd7, 0xc7, + 0x5a, 0xe2, 0xd9, 0x0d, 0xf9, 0x1f, 0x91, 0x67, 0xee, 0x98, 0x8e, 0xd9, 0xea, 0xbb, 0xbb, 0x76, + 0x4b, 0xfe, 0xc7, 0xb5, 0xa5, 0xb2, 0xcf, 0x47, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x38, 0x69, + 0xf4, 0xf4, 0x88, 0x0d, 0x00, 0x00, } func (x Direction) String() string { @@ -1251,6 +1477,73 @@ func (this *QueryRequest) Equal(that interface{}) bool { } return true } +func (this *SampleQueryRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SampleQueryRequest) + if !ok { + that2, ok := that.(SampleQueryRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Selector != that1.Selector { + return false + } + if !this.Start.Equal(that1.Start) { + return false + } + if !this.End.Equal(that1.End) { + return false + } + if len(this.Shards) != len(that1.Shards) { + return false + } + for i := range this.Shards { + if this.Shards[i] != that1.Shards[i] { + return false + } + } + return true +} +func (this *SampleQueryResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SampleQueryResponse) + if !ok { + that2, ok := that.(SampleQueryResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Series) != len(that1.Series) { + return false + } + for i := range this.Series { + if !this.Series[i].Equal(that1.Series[i]) { + return false + } + } + return true +} func (this *QueryResponse) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1409,14 +1702,14 @@ func (this *EntryAdapter) Equal(that interface{}) bool { } return true } -func (this *TailRequest) Equal(that interface{}) bool { +func (this *Sample) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*TailRequest) + that1, ok := that.(*Sample) if !ok { - that2, ok := that.(TailRequest) + that2, ok := that.(Sample) if ok { that1 = &that2 } else { @@ -1428,28 +1721,25 @@ func (this *TailRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if this.Query != that1.Query { - return false - } - if this.DelayFor != that1.DelayFor { + if this.Timestamp != that1.Timestamp { return false } - if this.Limit != that1.Limit { + if this.Value != that1.Value { return false } - if !this.Start.Equal(that1.Start) { + if this.Hash != that1.Hash { return false } return true } -func (this *TailResponse) Equal(that interface{}) bool { +func (this *Series) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*TailResponse) + that1, ok := that.(*Series) if !ok { - that2, ok := that.(TailResponse) + that2, ok := that.(Series) if ok { that1 = &that2 } else { @@ -1461,29 +1751,94 @@ func (this *TailResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if that1.Stream == nil { - if this.Stream != nil { - return false - } - } else if !this.Stream.Equal(*that1.Stream) { + if this.Labels != that1.Labels { return false } - if len(this.DroppedStreams) != len(that1.DroppedStreams) { + if len(this.Samples) != len(that1.Samples) { return false } - for i := range this.DroppedStreams { - if !this.DroppedStreams[i].Equal(that1.DroppedStreams[i]) { + for i := range this.Samples { + if !this.Samples[i].Equal(&that1.Samples[i]) { return false } } return true } -func (this *SeriesRequest) Equal(that interface{}) bool { +func (this *TailRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*SeriesRequest) + that1, ok := that.(*TailRequest) + if !ok { + that2, ok := that.(TailRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Query != that1.Query { + return false + } + if this.DelayFor != that1.DelayFor { + return false + } + if this.Limit != that1.Limit { + return false + } + if !this.Start.Equal(that1.Start) { + return false + } + return true +} +func (this *TailResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TailResponse) + if !ok { + that2, ok := that.(TailResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Stream == nil { + if this.Stream != nil { + return false + } + } else if !this.Stream.Equal(*that1.Stream) { + return false + } + if len(this.DroppedStreams) != len(that1.DroppedStreams) { + return false + } + for i := range this.DroppedStreams { + if !this.DroppedStreams[i].Equal(that1.DroppedStreams[i]) { + return false + } + } + return true +} +func (this *SeriesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesRequest) if !ok { that2, ok := that.(SeriesRequest) if ok { @@ -1795,6 +2150,29 @@ func (this *QueryRequest) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *SampleQueryRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&logproto.SampleQueryRequest{") + s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "Shards: "+fmt.Sprintf("%#v", this.Shards)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SampleQueryResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.SampleQueryResponse{") + s = append(s, "Series: "+fmt.Sprintf("%#v", this.Series)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *QueryResponse) GoString() string { if this == nil { return "nil" @@ -1856,6 +2234,35 @@ func (this *EntryAdapter) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *Sample) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.Sample{") + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "Hash: "+fmt.Sprintf("%#v", this.Hash)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Series) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.Series{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Samples != nil { + vs := make([]*Sample, len(this.Samples)) + for i := range vs { + vs[i] = &this.Samples[i] + } + s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *TailRequest) GoString() string { if this == nil { return "nil" @@ -2096,6 +2503,7 @@ var _Pusher_serviceDesc = grpc.ServiceDesc{ // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type QuerierClient interface { Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Querier_QueryClient, error) + QuerySample(ctx context.Context, in *SampleQueryRequest, opts ...grpc.CallOption) (Querier_QuerySampleClient, error) Label(ctx context.Context, in *LabelRequest, opts ...grpc.CallOption) (*LabelResponse, error) Tail(ctx context.Context, in *TailRequest, opts ...grpc.CallOption) (Querier_TailClient, error) Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (*SeriesResponse, error) @@ -2142,6 +2550,38 @@ func (x *querierQueryClient) Recv() (*QueryResponse, error) { return m, nil } +func (c *querierClient) QuerySample(ctx context.Context, in *SampleQueryRequest, opts ...grpc.CallOption) (Querier_QuerySampleClient, error) { + stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[1], "/logproto.Querier/QuerySample", opts...) + if err != nil { + return nil, err + } + x := &querierQuerySampleClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Querier_QuerySampleClient interface { + Recv() (*SampleQueryResponse, error) + grpc.ClientStream +} + +type querierQuerySampleClient struct { + grpc.ClientStream +} + +func (x *querierQuerySampleClient) Recv() (*SampleQueryResponse, error) { + m := new(SampleQueryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *querierClient) Label(ctx context.Context, in *LabelRequest, opts ...grpc.CallOption) (*LabelResponse, error) { out := new(LabelResponse) err := c.cc.Invoke(ctx, "/logproto.Querier/Label", in, out, opts...) @@ -2152,7 +2592,7 @@ func (c *querierClient) Label(ctx context.Context, in *LabelRequest, opts ...grp } func (c *querierClient) Tail(ctx context.Context, in *TailRequest, opts ...grpc.CallOption) (Querier_TailClient, error) { - stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[1], "/logproto.Querier/Tail", opts...) + stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[2], "/logproto.Querier/Tail", opts...) if err != nil { return nil, err } @@ -2204,6 +2644,7 @@ func (c *querierClient) TailersCount(ctx context.Context, in *TailersCountReques // QuerierServer is the server API for Querier service. type QuerierServer interface { Query(*QueryRequest, Querier_QueryServer) error + QuerySample(*SampleQueryRequest, Querier_QuerySampleServer) error Label(context.Context, *LabelRequest) (*LabelResponse, error) Tail(*TailRequest, Querier_TailServer) error Series(context.Context, *SeriesRequest) (*SeriesResponse, error) @@ -2235,6 +2676,27 @@ func (x *querierQueryServer) Send(m *QueryResponse) error { return x.ServerStream.SendMsg(m) } +func _Querier_QuerySample_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SampleQueryRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QuerierServer).QuerySample(m, &querierQuerySampleServer{stream}) +} + +type Querier_QuerySampleServer interface { + Send(*SampleQueryResponse) error + grpc.ServerStream +} + +type querierQuerySampleServer struct { + grpc.ServerStream +} + +func (x *querierQuerySampleServer) Send(m *SampleQueryResponse) error { + return x.ServerStream.SendMsg(m) +} + func _Querier_Label_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(LabelRequest) if err := dec(in); err != nil { @@ -2333,6 +2795,11 @@ var _Querier_serviceDesc = grpc.ServiceDesc{ Handler: _Querier_Query_Handler, ServerStreams: true, }, + { + StreamName: "QuerySample", + Handler: _Querier_QuerySample_Handler, + ServerStreams: true, + }, { StreamName: "Tail", Handler: _Querier_Tail_Handler, @@ -2553,6 +3020,91 @@ func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *SampleQueryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SampleQueryRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Selector) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End))) + n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Shards) > 0 { + for _, s := range m.Shards { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *SampleQueryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SampleQueryResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Series) > 0 { + for _, msg := range m.Series { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *QueryResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2618,21 +3170,21 @@ func (m *LabelRequest) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start))) - n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i:]) + n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n5 } if m.End != nil { dAtA[i] = 0x22 i++ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.End))) - n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i:]) + n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n6 } return i, nil } @@ -2724,11 +3276,11 @@ func (m *EntryAdapter) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp))) - n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n7 if len(m.Line) > 0 { dAtA[i] = 0x12 i++ @@ -2738,7 +3290,7 @@ func (m *EntryAdapter) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *TailRequest) Marshal() (dAtA []byte, err error) { +func (m *Sample) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2748,39 +3300,31 @@ func (m *TailRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TailRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Query) > 0 { - dAtA[i] = 0xa + if m.Timestamp != 0 { + dAtA[i] = 0x8 i++ - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Query))) - i += copy(dAtA[i:], m.Query) + i = encodeVarintLogproto(dAtA, i, uint64(m.Timestamp)) } - if m.DelayFor != 0 { - dAtA[i] = 0x18 + if m.Value != 0 { + dAtA[i] = 0x11 i++ - i = encodeVarintLogproto(dAtA, i, uint64(m.DelayFor)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i += 8 } - if m.Limit != 0 { - dAtA[i] = 0x20 + if m.Hash != 0 { + dAtA[i] = 0x18 i++ - i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) - } - dAtA[i] = 0x2a - i++ - i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) - n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintLogproto(dAtA, i, uint64(m.Hash)) } - i += n6 return i, nil } -func (m *TailResponse) Marshal() (dAtA []byte, err error) { +func (m *Series) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2790,22 +3334,100 @@ func (m *TailResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TailResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *Series) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.Stream != nil { + if len(m.Labels) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintLogproto(dAtA, i, uint64(m.Stream.Size())) - n7, err := m.Stream.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels))) + i += copy(dAtA[i:], m.Labels) } - if len(m.DroppedStreams) > 0 { + if len(m.Samples) > 0 { + for _, msg := range m.Samples { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TailRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TailRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Query) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Query))) + i += copy(dAtA[i:], m.Query) + } + if m.DelayFor != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.DelayFor)) + } + if m.Limit != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) + } + dAtA[i] = 0x2a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *TailResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TailResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Stream != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Stream.Size())) + n9, err := m.Stream.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if len(m.DroppedStreams) > 0 { for _, msg := range m.DroppedStreams { dAtA[i] = 0x12 i++ @@ -2838,19 +3460,19 @@ func (m *SeriesRequest) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) - n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n10 dAtA[i] = 0x12 i++ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End))) - n9, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:]) + n11, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n11 if len(m.Groups) > 0 { for _, s := range m.Groups { dAtA[i] = 0x1a @@ -2952,19 +3574,19 @@ func (m *DroppedStream) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.From))) - n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i:]) + n12, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n12 dAtA[i] = 0x12 i++ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.To))) - n11, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i:]) + n13, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n13 if len(m.Labels) > 0 { dAtA[i] = 0x1a i++ @@ -3203,6 +3825,44 @@ func (m *QueryRequest) Size() (n int) { return n } +func (m *SampleQueryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Selector) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovLogproto(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End) + n += 1 + l + sovLogproto(uint64(l)) + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *SampleQueryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Series) > 0 { + for _, e := range m.Series { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + func (m *QueryResponse) Size() (n int) { if m == nil { return 0 @@ -3291,6 +3951,43 @@ func (m *EntryAdapter) Size() (n int) { return n } +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovLogproto(uint64(m.Timestamp)) + } + if m.Value != 0 { + n += 9 + } + if m.Hash != 0 { + n += 1 + sovLogproto(uint64(m.Hash)) + } + return n +} + +func (m *Series) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Labels) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + func (m *TailRequest) Size() (n int) { if m == nil { return 0 @@ -3535,6 +4232,29 @@ func (this *QueryRequest) String() string { }, "") return s } +func (this *SampleQueryRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SampleQueryRequest{`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`, + `}`, + }, "") + return s +} +func (this *SampleQueryResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SampleQueryResponse{`, + `Series:` + fmt.Sprintf("%v", this.Series) + `,`, + `}`, + }, "") + return s +} func (this *QueryResponse) String() string { if this == nil { return "nil" @@ -3590,6 +4310,29 @@ func (this *EntryAdapter) String() string { }, "") return s } +func (this *Sample) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sample{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Hash:` + fmt.Sprintf("%v", this.Hash) + `,`, + `}`, + }, "") + return s +} +func (this *Series) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Series{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "Sample", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *TailRequest) String() string { if this == nil { return "nil" @@ -4099,7 +4842,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryResponse) Unmarshal(dAtA []byte) error { +func (m *SampleQueryRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4122,15 +4865,47 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SampleQueryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SampleQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4157,30 +4932,268 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Streams = append(m.Streams, Stream{}) - if err := m.Streams[len(m.Streams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogproto(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) } - if skippy < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthLogproto } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthLogproto } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SampleQueryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SampleQueryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SampleQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Series = append(m.Series, Series{}) + if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Streams = append(m.Streams, Stream{}) + if err := m.Streams[len(m.Streams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if iNdEx > l { return io.ErrUnexpectedEOF } @@ -4685,6 +5698,227 @@ func (m *EntryAdapter) Unmarshal(dAtA []byte) error { } return nil } +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Series) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Series: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Series: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TailRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto index d23c773739..20bff77e5e 100644 --- a/pkg/logproto/logproto.proto +++ b/pkg/logproto/logproto.proto @@ -13,6 +13,7 @@ service Pusher { service Querier { rpc Query(QueryRequest) returns (stream QueryResponse) {}; + rpc QuerySample(SampleQueryRequest) returns (stream SampleQueryResponse) {}; rpc Label(LabelRequest) returns (LabelResponse) {}; rpc Tail(TailRequest) returns (stream TailResponse) {}; rpc Series(SeriesRequest) returns (SeriesResponse) {}; @@ -38,7 +39,17 @@ message QueryRequest { Direction direction = 5; reserved 6; repeated string shards = 7 [(gogoproto.jsontag) = "shards,omitempty"]; +} +message SampleQueryRequest { + string selector = 1; + google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"]; +} + +message SampleQueryResponse { + repeated Series series = 1 [(gogoproto.customtype) = "Series", (gogoproto.nullable) = true]; } enum Direction { @@ -71,6 +82,17 @@ message EntryAdapter { string line = 2 [(gogoproto.jsontag) = "line"]; } +message Sample { + int64 timestamp = 1 [(gogoproto.jsontag) = "ts"]; + double value = 2 [(gogoproto.jsontag) = "value"]; + uint64 hash = 3 [(gogoproto.jsontag) = "hash"]; +} + +message Series { + string labels = 1 [(gogoproto.jsontag) = "labels"]; + repeated Sample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "samples"]; +} + message TailRequest { string query = 1; reserved 2; diff --git a/pkg/logproto/types.go b/pkg/logproto/types.go index 20c5a0a591..fbca7e70a9 100644 --- a/pkg/logproto/types.go +++ b/pkg/logproto/types.go @@ -1,9 +1,9 @@ package logproto import ( - fmt "fmt" - io "io" - time "time" + "fmt" + "io" + "time" ) // Stream contains a unique labels set as a string and a set of entries for it. diff --git a/pkg/logql/ast.go b/pkg/logql/ast.go index 57911354ab..17dab24264 100644 --- a/pkg/logql/ast.go +++ b/pkg/logql/ast.go @@ -21,29 +21,49 @@ type Expr interface { fmt.Stringer } +type QueryParams interface { + LogSelector() (LogSelectorExpr, error) + GetStart() time.Time + GetEnd() time.Time + GetShards() []string +} + // SelectParams specifies parameters passed to data selections. -type SelectParams struct { +type SelectLogParams struct { *logproto.QueryRequest } // LogSelector returns the LogSelectorExpr from the SelectParams. // The `LogSelectorExpr` can then returns all matchers and filters to use for that request. -func (s SelectParams) LogSelector() (LogSelectorExpr, error) { +func (s SelectLogParams) LogSelector() (LogSelectorExpr, error) { return ParseLogSelector(s.Selector) } -// QuerierFunc implements Querier. -type QuerierFunc func(context.Context, SelectParams) (iter.EntryIterator, error) +type SelectSampleParams struct { + *logproto.SampleQueryRequest +} -// Select implements Querier. -func (q QuerierFunc) Select(ctx context.Context, p SelectParams) (iter.EntryIterator, error) { - return q(ctx, p) +// Expr returns the SampleExpr from the SelectSampleParams. +// The `LogSelectorExpr` can then returns all matchers and filters to use for that request. +func (s SelectSampleParams) Expr() (SampleExpr, error) { + return ParseSampleExpr(s.Selector) +} + +// LogSelector returns the LogSelectorExpr from the SelectParams. +// The `LogSelectorExpr` can then returns all matchers and filters to use for that request. +func (s SelectSampleParams) LogSelector() (LogSelectorExpr, error) { + expr, err := ParseSampleExpr(s.Selector) + if err != nil { + return nil, err + } + return expr.Selector(), nil } // Querier allows a LogQL expression to fetch an EntryIterator for a // set of matchers and filters type Querier interface { - Select(context.Context, SelectParams) (iter.EntryIterator, error) + SelectLogs(context.Context, SelectLogParams) (iter.EntryIterator, error) + SelectSamples(context.Context, SelectSampleParams) (iter.SampleIterator, error) } // LogSelectorExpr is a LogQL expression filtering and returning logs. @@ -162,9 +182,7 @@ type logRange struct { // impls Stringer func (r logRange) String() string { var sb strings.Builder - sb.WriteString("(") sb.WriteString(r.left.String()) - sb.WriteString(")") sb.WriteString(fmt.Sprintf("[%v]", model.Duration(r.interval))) return sb.String() } @@ -248,6 +266,7 @@ func IsLogicalBinOp(op string) bool { type SampleExpr interface { // Selector is the LogQL selector to apply when retrieving logs. Selector() LogSelectorExpr + Extractor() (SampleExtractor, error) // Operations returns the list of operations used in this SampleExpr Operations() []string Expr @@ -345,6 +364,10 @@ func (e *vectorAggregationExpr) Selector() LogSelectorExpr { return e.left.Selector() } +func (e *vectorAggregationExpr) Extractor() (SampleExtractor, error) { + return e.left.Extractor() +} + // impl Expr func (e *vectorAggregationExpr) logQLExpr() {} @@ -482,10 +505,11 @@ func (e *literalExpr) String() string { // literlExpr impls SampleExpr & LogSelectorExpr mainly to reduce the need for more complicated typings // to facilitate sum types. We'll be type switching when evaluating them anyways // and they will only be present in binary operation legs. -func (e *literalExpr) Selector() LogSelectorExpr { return e } -func (e *literalExpr) Operations() []string { return nil } -func (e *literalExpr) Filter() (LineFilter, error) { return nil, nil } -func (e *literalExpr) Matchers() []*labels.Matcher { return nil } +func (e *literalExpr) Selector() LogSelectorExpr { return e } +func (e *literalExpr) Operations() []string { return nil } +func (e *literalExpr) Filter() (LineFilter, error) { return nil, nil } +func (e *literalExpr) Matchers() []*labels.Matcher { return nil } +func (e *literalExpr) Extractor() (SampleExtractor, error) { return nil, nil } // helper used to impl Stringer for vector and range aggregations // nolint:interfacer diff --git a/pkg/logql/ast_test.go b/pkg/logql/ast_test.go index 064a0236f5..bda46afce6 100644 --- a/pkg/logql/ast_test.go +++ b/pkg/logql/ast_test.go @@ -204,16 +204,16 @@ func TestStringer(t *testing.T) { }, { in: `1 > bool 1 > count_over_time({foo="bar"}[1m])`, - out: `0.000000 > count_over_time(({foo="bar"})[1m])`, + out: `0.000000 > count_over_time({foo="bar"}[1m])`, }, { in: `1 > bool 1 > bool count_over_time({foo="bar"}[1m])`, - out: `0.000000 > bool count_over_time(({foo="bar"})[1m])`, + out: `0.000000 > bool count_over_time({foo="bar"}[1m])`, }, { - in: `0.000000 > count_over_time(({foo="bar"})[1m])`, - out: `0.000000 > count_over_time(({foo="bar"})[1m])`, + in: `0.000000 > count_over_time({foo="bar"}[1m])`, + out: `0.000000 > count_over_time({foo="bar"}[1m])`, }, } { t.Run(tc.in, func(t *testing.T) { diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index 20339f6f96..3904ca4570 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" "math" + + // "math" "testing" "time" @@ -26,7 +28,7 @@ var ( ErrMockMultiple = errors.New("Multiple errors: [mock error mock error]") ) -func TestEngine_InstantQuery(t *testing.T) { +func TestEngine_LogsInstantQuery(t *testing.T) { t.Parallel() for _, test := range []struct { qs string @@ -34,10 +36,10 @@ func TestEngine_InstantQuery(t *testing.T) { direction logproto.Direction limit uint32 - // an array of streams per SelectParams will be returned by the querier. + // an array of data per params will be returned by the querier. // This is to cover logql that requires multiple queries. - streams [][]logproto.Stream - params []SelectParams + data interface{} + params interface{} expected parser.Value }{ @@ -46,7 +48,7 @@ func TestEngine_InstantQuery(t *testing.T) { [][]logproto.Stream{ {newStream(testSize, identity, `{app="foo"}`)}, }, - []SelectParams{ + []SelectLogParams{ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(30, 0), Limit: 10, Selector: `{app="foo"}`}}, }, Streams([]logproto.Stream{newStream(10, identity, `{app="foo"}`)}), @@ -56,59 +58,62 @@ func TestEngine_InstantQuery(t *testing.T) { [][]logproto.Stream{ {newStream(testSize, identity, `{app="bar"}`)}, }, - []SelectParams{ + []SelectLogParams{ {&logproto.QueryRequest{Direction: logproto.BACKWARD, Start: time.Unix(0, 0), End: time.Unix(30, 0), Limit: 30, Selector: `{app="bar"}|="foo"|~".+bar"`}}, }, Streams([]logproto.Stream{newStream(30, identity, `{app="bar"}`)}), }, { `rate({app="foo"} |~".+bar" [1m])`, time.Unix(60, 0), logproto.BACKWARD, 10, - [][]logproto.Stream{ - {newStream(testSize, identity, `{app="foo"}`)}, + [][]logproto.Series{ + {newSeries(testSize, identity, `{app="foo"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app="foo"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app="foo"}|~".+bar"[1m])`}}, }, promql.Vector{promql.Sample{Point: promql.Point{T: 60 * 1000, V: 1}, Metric: labels.Labels{labels.Label{Name: "app", Value: "foo"}}}}, }, { `rate({app="foo"}[30s])`, time.Unix(60, 0), logproto.FORWARD, 10, - [][]logproto.Stream{ + [][]logproto.Series{ // 30s range the lower bound of the range is not inclusive only 15 samples will make it 60 included - {newStream(testSize, offset(46, identity), `{app="foo"}`)}, + {newSeries(testSize, offset(46, identity), `{app="foo"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(30, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app="foo"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(60, 0), Selector: `rate({app="foo"}[30s])`}}, }, promql.Vector{promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.5}, Metric: labels.Labels{labels.Label{Name: "app", Value: "foo"}}}}, }, { `count_over_time({app="foo"} |~".+bar" [1m])`, time.Unix(60, 0), logproto.BACKWARD, 10, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`)}, // 10 , 20 , 30 .. 60 = 6 total + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`)}, // 10 , 20 , 30 .. 60 = 6 total }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app="foo"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app="foo"}|~".+bar"[1m])`}}, }, promql.Vector{promql.Sample{Point: promql.Point{T: 60 * 1000, V: 6}, Metric: labels.Labels{labels.Label{Name: "app", Value: "foo"}}}}, }, { `count_over_time(({app="foo"} |~".+bar")[5m])`, time.Unix(5*60, 0), logproto.BACKWARD, 10, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`)}, // 10 , 20 , 30 .. 300 = 30 total + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`)}, // 10 , 20 , 30 .. 300 = 30 total }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(5*60, 0), Limit: 0, Selector: `{app="foo"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(5*60, 0), Selector: `count_over_time({app="foo"}|~".+bar"[5m])`}}, }, promql.Vector{promql.Sample{Point: promql.Point{T: 5 * 60 * 1000, V: 30}, Metric: labels.Labels{labels.Label{Name: "app", Value: "foo"}}}}, }, { `avg(count_over_time({app=~"foo|bar"} |~".+bar" [1m]))`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(10, identity), `{app="bar"}`)}, + [][]logproto.Series{ + { + newSeries(testSize, factor(10, identity), `{app="foo"}`), + newSeries(testSize, factor(10, identity), `{app="bar"}`), + }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 6}, Metric: labels.Labels{}}, @@ -116,11 +121,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `min(rate({app=~"foo|bar"} |~".+bar" [1m]))`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(10, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(10, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.1}, Metric: labels.Labels{}}, @@ -128,11 +133,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `max by (app) (rate({app=~"foo|bar"} |~".+bar" [1m]))`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.2}, Metric: labels.Labels{labels.Label{Name: "app", Value: "bar"}}}, @@ -141,11 +146,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `max(rate({app=~"foo|bar"} |~".+bar" [1m]))`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.2}, Metric: labels.Labels{}}, @@ -153,11 +158,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `sum(rate({app=~"foo|bar"} |~".+bar" [1m]))`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(5, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(5, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.4}, Metric: labels.Labels{}}, @@ -165,11 +170,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `sum(count_over_time({app=~"foo|bar"} |~".+bar" [1m])) by (app)`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(10, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(10, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 6}, Metric: labels.Labels{labels.Label{Name: "app", Value: "bar"}}}, @@ -178,11 +183,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `count(count_over_time({app=~"foo|bar"} |~".+bar" [1m])) without (app)`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(10, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(10, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 2}, Metric: labels.Labels{}}, @@ -190,11 +195,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `stdvar without (app) (count_over_time(({app=~"foo|bar"} |~".+bar")[1m])) `, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 9}, Metric: labels.Labels{}}, @@ -202,11 +207,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `stddev(count_over_time(({app=~"foo|bar"} |~".+bar")[1m])) `, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(2, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(2, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 12}, Metric: labels.Labels{}}, @@ -214,11 +219,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `rate(({app=~"foo|bar"} |~".+bar")[1m])`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, offset(46, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, offset(46, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.25}, Metric: labels.Labels{labels.Label{Name: "app", Value: "bar"}}}, @@ -227,11 +232,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `topk(2,rate(({app=~"foo|bar"} |~".+bar")[1m]))`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, offset(46, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, offset(46, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.25}, Metric: labels.Labels{labels.Label{Name: "app", Value: "bar"}}}, @@ -240,11 +245,11 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `topk(1,rate(({app=~"foo|bar"} |~".+bar")[1m]))`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, offset(46, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, offset(46, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.25}, Metric: labels.Labels{labels.Label{Name: "app", Value: "bar"}}}, @@ -252,12 +257,12 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `topk(1,rate(({app=~"foo|bar"} |~".+bar")[1m])) by (app)`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, offset(46, identity), `{app="bar"}`), - newStream(testSize, factor(5, identity), `{app="fuzz"}`), newStream(testSize, identity, `{app="buzz"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, offset(46, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="fuzz"}`), newSeries(testSize, identity, `{app="buzz"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.25}, Metric: labels.Labels{labels.Label{Name: "app", Value: "bar"}}}, @@ -268,12 +273,12 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `bottomk(2,rate(({app=~"foo|bar"} |~".+bar")[1m]))`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, offset(46, identity), `{app="bar"}`), - newStream(testSize, factor(5, identity), `{app="fuzz"}`), newStream(testSize, identity, `{app="buzz"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, offset(46, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="fuzz"}`), newSeries(testSize, identity, `{app="buzz"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.1}, Metric: labels.Labels{labels.Label{Name: "app", Value: "foo"}}}, @@ -282,12 +287,12 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `bottomk(3,rate(({app=~"foo|bar"} |~".+bar")[1m])) without (app)`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, offset(46, identity), `{app="bar"}`), - newStream(testSize, factor(5, identity), `{app="fuzz"}`), newStream(testSize, identity, `{app="buzz"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, offset(46, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="fuzz"}`), newSeries(testSize, identity, `{app="buzz"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0.25}, Metric: labels.Labels{labels.Label{Name: "app", Value: "bar"}}}, @@ -297,12 +302,12 @@ func TestEngine_InstantQuery(t *testing.T) { }, { `bottomk(3,rate(({app=~"foo|bar"} |~".+bar")[1m])) without (app) + 1`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, offset(46, identity), `{app="bar"}`), - newStream(testSize, factor(5, identity), `{app="fuzz"}`), newStream(testSize, identity, `{app="buzz"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, offset(46, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="fuzz"}`), newSeries(testSize, identity, `{app="buzz"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 1.25}, Metric: labels.Labels{labels.Label{Name: "app", Value: "bar"}}}, @@ -313,32 +318,32 @@ func TestEngine_InstantQuery(t *testing.T) { { // healthcheck `1+1`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{}, - []SelectParams{}, + nil, + nil, promql.Scalar{T: 60 * 1000, V: 2}, }, { // single literal `2`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{}, - []SelectParams{}, + nil, + nil, promql.Scalar{T: 60 * 1000, V: 2}, }, { // single comparison `1 == 1`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{}, - []SelectParams{}, + nil, + nil, promql.Scalar{T: 60 * 1000, V: 1}, }, { // single comparison, reduce away bool modifier between scalars `1 == bool 1`, time.Unix(60, 0), logproto.FORWARD, 100, - [][]logproto.Stream{}, - []SelectParams{}, + nil, + nil, promql.Scalar{T: 60 * 1000, V: 1}, }, { @@ -346,11 +351,11 @@ func TestEngine_InstantQuery(t *testing.T) { time.Unix(60, 0), logproto.FORWARD, 0, - [][]logproto.Stream{ - {newStream(testSize, identity, `{app="foo"}`)}, + [][]logproto.Series{ + {newSeries(testSize, identity, `{app="foo"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `{app="foo"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app="foo"}[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 60}, Metric: labels.Labels{labels.Label{Name: "app", Value: "foo"}}}, @@ -361,13 +366,13 @@ func TestEngine_InstantQuery(t *testing.T) { time.Unix(60, 0), logproto.FORWARD, 0, - [][]logproto.Stream{ - {newStream(testSize, identity, `{app="foo"}`)}, + [][]logproto.Series{ + {newSeries(testSize, identity, `{app="foo"}`)}, {}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `{app="foo"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app="foo"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app="bar"}[1m])`}}, }, promql.Vector{}, }, @@ -376,13 +381,13 @@ func TestEngine_InstantQuery(t *testing.T) { time.Unix(60, 0), logproto.FORWARD, 0, - [][]logproto.Stream{ - {newStream(testSize, identity, `{app="foo"}`)}, + [][]logproto.Series{ + {newSeries(testSize, identity, `{app="foo"}`)}, {}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `{app="foo"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app="foo"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app="bar"}[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: "app", Value: "foo"}}}, @@ -393,13 +398,13 @@ func TestEngine_InstantQuery(t *testing.T) { time.Unix(60, 0), logproto.FORWARD, 0, - [][]logproto.Stream{ - {newStream(testSize, identity, `{app="foo"}`)}, + [][]logproto.Series{ + {newSeries(testSize, identity, `{app="foo"}`)}, {}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `{app="foo"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app="foo"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app="bar"}[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{}}, @@ -410,13 +415,13 @@ func TestEngine_InstantQuery(t *testing.T) { time.Unix(60, 0), logproto.FORWARD, 0, - [][]logproto.Stream{ - {newStream(testSize, identity, `{app="foo"}`)}, - {newStream(testSize, identity, `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, identity, `{app="foo"}`)}, + {newSeries(testSize, identity, `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `{app="foo"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app="foo"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app="bar"}[1m])`}}, }, promql.Vector{ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 60}, Metric: labels.Labels{}}, @@ -427,7 +432,7 @@ func TestEngine_InstantQuery(t *testing.T) { t.Run(fmt.Sprintf("%s %s", test.qs, test.direction), func(t *testing.T) { t.Parallel() - eng := NewEngine(EngineOpts{}, newQuerierRecorder(test.streams, test.params)) + eng := NewEngine(EngineOpts{}, newQuerierRecorder(t, test.data, test.params)) q := eng.Query(LiteralParams{ qs: test.qs, start: test.ts, @@ -457,8 +462,8 @@ func TestEngine_RangeQuery(t *testing.T) { // an array of streams per SelectParams will be returned by the querier. // This is to cover logql that requires multiple queries. - streams [][]logproto.Stream - params []SelectParams + data interface{} + params interface{} expected parser.Value }{ @@ -467,7 +472,7 @@ func TestEngine_RangeQuery(t *testing.T) { [][]logproto.Stream{ {newStream(testSize, identity, `{app="foo"}`)}, }, - []SelectParams{ + []SelectLogParams{ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(30, 0), Limit: 10, Selector: `{app="foo"}`}}, }, Streams([]logproto.Stream{newStream(10, identity, `{app="foo"}`)}), @@ -477,7 +482,7 @@ func TestEngine_RangeQuery(t *testing.T) { [][]logproto.Stream{ {newStream(testSize, identity, `{app="food"}`)}, }, - []SelectParams{ + []SelectLogParams{ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(30, 0), Limit: 10, Selector: `{app="food"}`}}, }, Streams([]logproto.Stream{newIntervalStream(10, 2*time.Second, identity, `{app="food"}`)}), @@ -487,7 +492,7 @@ func TestEngine_RangeQuery(t *testing.T) { [][]logproto.Stream{ {newBackwardStream(testSize, identity, `{app="fed"}`)}, }, - []SelectParams{ + []SelectLogParams{ {&logproto.QueryRequest{Direction: logproto.BACKWARD, Start: time.Unix(0, 0), End: time.Unix(30, 0), Limit: 10, Selector: `{app="fed"}`}}, }, Streams([]logproto.Stream{newBackwardIntervalStream(testSize, 10, 2*time.Second, identity, `{app="fed"}`)}), @@ -497,7 +502,7 @@ func TestEngine_RangeQuery(t *testing.T) { [][]logproto.Stream{ {newStream(testSize, identity, `{app="bar"}`)}, }, - []SelectParams{ + []SelectLogParams{ {&logproto.QueryRequest{Direction: logproto.BACKWARD, Start: time.Unix(0, 0), End: time.Unix(30, 0), Limit: 30, Selector: `{app="bar"}|="foo"|~".+bar"`}}, }, Streams([]logproto.Stream{newStream(30, identity, `{app="bar"}`)}), @@ -507,18 +512,18 @@ func TestEngine_RangeQuery(t *testing.T) { [][]logproto.Stream{ {newBackwardStream(testSize, identity, `{app="barf"}`)}, }, - []SelectParams{ + []SelectLogParams{ {&logproto.QueryRequest{Direction: logproto.BACKWARD, Start: time.Unix(0, 0), End: time.Unix(30, 0), Limit: 30, Selector: `{app="barf"}|="foo"|~".+bar"`}}, }, Streams([]logproto.Stream{newBackwardIntervalStream(testSize, 30, 3*time.Second, identity, `{app="barf"}`)}), }, { `rate({app="foo"} |~".+bar" [1m])`, time.Unix(60, 0), time.Unix(120, 0), time.Minute, 0, logproto.BACKWARD, 10, - [][]logproto.Stream{ - {newStream(testSize, identity, `{app="foo"}`)}, + [][]logproto.Series{ + {newSeries(testSize, identity, `{app="foo"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(120, 0), Limit: 0, Selector: `{app="foo"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(120, 0), Selector: `rate({app="foo"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -529,11 +534,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `rate({app="foo"}[30s])`, time.Unix(60, 0), time.Unix(120, 0), 15 * time.Second, 0, logproto.FORWARD, 10, - [][]logproto.Stream{ - {newStream(testSize, factor(2, identity), `{app="foo"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(2, identity), `{app="foo"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(30, 0), End: time.Unix(120, 0), Limit: 0, Selector: `{app="foo"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(120, 0), Selector: `rate({app="foo"}[30s])`}}, }, promql.Matrix{ promql.Series{ @@ -544,11 +549,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `count_over_time({app="foo"} |~".+bar" [1m])`, time.Unix(60, 0), time.Unix(120, 0), 30 * time.Second, 0, logproto.BACKWARD, 10, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`)}, // 10 , 20 , 30 .. 60 = 6 total + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`)}, // 10 , 20 , 30 .. 60 = 6 total }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(120, 0), Limit: 0, Selector: `{app="foo"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(120, 0), Selector: `count_over_time({app="foo"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -559,11 +564,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `count_over_time(({app="foo"} |~".+bar")[5m])`, time.Unix(5*60, 0), time.Unix(5*120, 0), 30 * time.Second, 0, logproto.BACKWARD, 10, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`)}, // 10 , 20 , 30 .. 300 = 30 total + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`)}, // 10 , 20 , 30 .. 300 = 30 total }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(5*120, 0), Limit: 0, Selector: `{app="foo"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(5*120, 0), Selector: `count_over_time({app="foo"}|~".+bar"[5m])`}}, }, promql.Matrix{ promql.Series{ @@ -586,11 +591,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `avg(count_over_time({app=~"foo|bar"} |~".+bar" [1m]))`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(10, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(10, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -601,11 +606,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `min(rate({app=~"foo|bar"} |~".+bar" [1m]))`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(10, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(10, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -616,11 +621,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `max by (app) (rate({app=~"foo|bar"} |~".+bar" [1m]))`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -635,11 +640,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `max(rate({app=~"foo|bar"} |~".+bar" [1m]))`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -650,11 +655,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `sum(rate({app=~"foo|bar"} |~".+bar" [1m]))`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(5, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(5, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -665,11 +670,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `sum(count_over_time({app=~"foo|bar"} |~".+bar" [1m])) by (app)`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -684,11 +689,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `count(count_over_time({app=~"foo|bar"} |~".+bar" [1m])) without (app)`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(10, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(10, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -699,11 +704,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `stdvar without (app) (count_over_time(({app=~"foo|bar"} |~".+bar")[1m])) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -714,11 +719,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `stddev(count_over_time(({app=~"foo|bar"} |~".+bar")[1m])) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(2, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(2, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -729,11 +734,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `rate(({app=~"foo|bar"} |~".+bar")[1m])`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -748,11 +753,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `topk(2,rate(({app=~"foo|bar"} |~".+bar")[1m]))`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`), newStream(testSize, factor(15, identity), `{app="boo"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`), newSeries(testSize, factor(15, identity), `{app="boo"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -767,11 +772,11 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `topk(1,rate(({app=~"foo|bar"} |~".+bar")[1m]))`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(5, identity), `{app="bar"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(5, identity), `{app="bar"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -782,12 +787,12 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `topk(1,rate(({app=~"foo|bar"} |~".+bar")[1m])) by (app)`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(15, identity), `{app="fuzz"}`), - newStream(testSize, factor(5, identity), `{app="fuzz"}`), newStream(testSize, identity, `{app="buzz"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(15, identity), `{app="fuzz"}`), + newSeries(testSize, factor(5, identity), `{app="fuzz"}`), newSeries(testSize, identity, `{app="buzz"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -806,12 +811,12 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `bottomk(2,rate(({app=~"foo|bar"} |~".+bar")[1m]))`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ - {newStream(testSize, factor(10, identity), `{app="foo"}`), newStream(testSize, factor(20, identity), `{app="bar"}`), - newStream(testSize, factor(5, identity), `{app="fuzz"}`), newStream(testSize, identity, `{app="buzz"}`)}, + [][]logproto.Series{ + {newSeries(testSize, factor(10, identity), `{app="foo"}`), newSeries(testSize, factor(20, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="fuzz"}`), newSeries(testSize, identity, `{app="buzz"}`)}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -826,16 +831,16 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `bottomk(3,rate(({app=~"foo|bar|fuzz|buzz"} |~".+bar")[1m])) without (app)`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(10, identity), `{app="foo"}`), - newStream(testSize, factor(20, identity), `{app="bar"}`), - newStream(testSize, factor(5, identity), `{app="fuzz"}`), - newStream(testSize, identity, `{app="buzz"}`), + newSeries(testSize, factor(10, identity), `{app="foo"}`), + newSeries(testSize, factor(20, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="fuzz"}`), + newSeries(testSize, identity, `{app="buzz"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar|fuzz|buzz"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar|fuzz|buzz"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -856,17 +861,17 @@ func TestEngine_RangeQuery(t *testing.T) { { `rate({app="foo"}[1m]) or rate({app="bar"}[1m])`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), }, { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="foo"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app="foo"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -885,18 +890,18 @@ func TestEngine_RangeQuery(t *testing.T) { rate({app="bar"}[1m]) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -911,18 +916,18 @@ func TestEngine_RangeQuery(t *testing.T) { rate({app="bar"}[1m]) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -937,18 +942,18 @@ func TestEngine_RangeQuery(t *testing.T) { rate({app="bar"}[1m]) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -963,18 +968,18 @@ func TestEngine_RangeQuery(t *testing.T) { rate({app="bar"}[1m]) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -989,18 +994,18 @@ func TestEngine_RangeQuery(t *testing.T) { count_over_time({app="bar"}[1m]) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app=~"foo|bar"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1015,18 +1020,18 @@ func TestEngine_RangeQuery(t *testing.T) { count_over_time({app="bar"}[1m]) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app=~"foo|bar"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1041,18 +1046,18 @@ func TestEngine_RangeQuery(t *testing.T) { count_over_time({app="bar"}[1m]) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app=~"foo|bar"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1067,18 +1072,18 @@ func TestEngine_RangeQuery(t *testing.T) { count_over_time({app="bar"}[1m]) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}`}}, - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app=~"foo|bar"}[1m])`}}, + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1095,14 +1100,14 @@ func TestEngine_RangeQuery(t *testing.T) { sum by (app) (rate({app=~"foo|bar"} |~".+bar" [1m])) `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1123,14 +1128,14 @@ func TestEngine_RangeQuery(t *testing.T) { ) * 2 `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1151,14 +1156,14 @@ func TestEngine_RangeQuery(t *testing.T) { ) + 1 `, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="foo"}`), - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="foo"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~"foo|bar"}|~".+bar"`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app=~"foo|bar"}|~".+bar"[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1170,8 +1175,8 @@ func TestEngine_RangeQuery(t *testing.T) { { `1+1--1`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{}, - []SelectParams{}, + nil, + nil, promql.Matrix{ promql.Series{ Points: []promql.Point{{T: 60000, V: 3}, {T: 90000, V: 3}, {T: 120000, V: 3}, {T: 150000, V: 3}, {T: 180000, V: 3}}, @@ -1181,13 +1186,13 @@ func TestEngine_RangeQuery(t *testing.T) { { `rate({app="bar"}[1m]) - 1`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1199,13 +1204,13 @@ func TestEngine_RangeQuery(t *testing.T) { { `1 - rate({app="bar"}[1m])`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1217,13 +1222,13 @@ func TestEngine_RangeQuery(t *testing.T) { { `rate({app="bar"}[1m]) - 1 / 2`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `rate({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1235,13 +1240,13 @@ func TestEngine_RangeQuery(t *testing.T) { { `count_over_time({app="bar"}[1m]) ^ count_over_time({app="bar"}[1m])`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{ + [][]logproto.Series{ { - newStream(testSize, factor(5, identity), `{app="bar"}`), + newSeries(testSize, factor(5, identity), `{app="bar"}`), }, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app="bar"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(180, 0), Selector: `count_over_time({app="bar"}[1m])`}}, }, promql.Matrix{ promql.Series{ @@ -1253,8 +1258,8 @@ func TestEngine_RangeQuery(t *testing.T) { { `2`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, 0, logproto.FORWARD, 100, - [][]logproto.Stream{}, - []SelectParams{}, + nil, + nil, promql.Matrix{ promql.Series{ Points: []promql.Point{{T: 60 * 1000, V: 2}, {T: 90 * 1000, V: 2}, {T: 120 * 1000, V: 2}, {T: 150 * 1000, V: 2}, {T: 180 * 1000, V: 2}}, @@ -1263,20 +1268,20 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `bytes_rate({app="foo"}[30s])`, time.Unix(60, 0), time.Unix(120, 0), 15 * time.Second, 0, logproto.FORWARD, 10, - [][]logproto.Stream{ - {logproto.Stream{ + [][]logproto.Series{ + {logproto.Series{ Labels: `{app="foo"}`, - Entries: []logproto.Entry{ - {Timestamp: time.Unix(45, 0), Line: "0123456789"}, // 10 bytes / 30s for the first point. - {Timestamp: time.Unix(60, 0), Line: ""}, - {Timestamp: time.Unix(75, 0), Line: ""}, - {Timestamp: time.Unix(90, 0), Line: ""}, - {Timestamp: time.Unix(105, 0), Line: ""}, + Samples: []logproto.Sample{ + {Timestamp: time.Unix(45, 0).UnixNano(), Hash: 1, Value: 10.}, // 10 bytes / 30s for the first point. + {Timestamp: time.Unix(60, 0).UnixNano(), Hash: 2, Value: 0.}, + {Timestamp: time.Unix(75, 0).UnixNano(), Hash: 3, Value: 0.}, + {Timestamp: time.Unix(90, 0).UnixNano(), Hash: 4, Value: 0.}, + {Timestamp: time.Unix(105, 0).UnixNano(), Hash: 5, Value: 0.}, }, }}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(30, 0), End: time.Unix(120, 0), Limit: 0, Selector: `{app="foo"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(120, 0), Selector: `bytes_rate({app="foo"}[30s])`}}, }, promql.Matrix{ promql.Series{ @@ -1287,20 +1292,20 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `bytes_over_time({app="foo"}[30s])`, time.Unix(60, 0), time.Unix(120, 0), 15 * time.Second, 0, logproto.FORWARD, 10, - [][]logproto.Stream{ - {logproto.Stream{ + [][]logproto.Series{ + {logproto.Series{ Labels: `{app="foo"}`, - Entries: []logproto.Entry{ - {Timestamp: time.Unix(45, 0), Line: "01234"}, // 5 bytes - {Timestamp: time.Unix(60, 0), Line: ""}, - {Timestamp: time.Unix(75, 0), Line: ""}, - {Timestamp: time.Unix(90, 0), Line: ""}, - {Timestamp: time.Unix(105, 0), Line: "0123"}, // 4 bytes + Samples: []logproto.Sample{ + {Timestamp: time.Unix(45, 0).UnixNano(), Hash: 1, Value: 5.}, // 5 bytes + {Timestamp: time.Unix(60, 0).UnixNano(), Hash: 2, Value: 0.}, + {Timestamp: time.Unix(75, 0).UnixNano(), Hash: 3, Value: 0.}, + {Timestamp: time.Unix(90, 0).UnixNano(), Hash: 4, Value: 0.}, + {Timestamp: time.Unix(105, 0).UnixNano(), Hash: 5, Value: 4.}, // 4 bytes }, }}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(30, 0), End: time.Unix(120, 0), Limit: 0, Selector: `{app="foo"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(120, 0), Selector: `bytes_over_time({app="foo"}[30s])`}}, }, promql.Matrix{ promql.Series{ @@ -1311,20 +1316,20 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `bytes_over_time({app="foo"}[30s]) > bool 1`, time.Unix(60, 0), time.Unix(120, 0), 15 * time.Second, 0, logproto.FORWARD, 10, - [][]logproto.Stream{ - {logproto.Stream{ + [][]logproto.Series{ + {logproto.Series{ Labels: `{app="foo"}`, - Entries: []logproto.Entry{ - {Timestamp: time.Unix(45, 0), Line: "01234"}, // 5 bytes - {Timestamp: time.Unix(60, 0), Line: ""}, - {Timestamp: time.Unix(75, 0), Line: ""}, - {Timestamp: time.Unix(90, 0), Line: ""}, - {Timestamp: time.Unix(105, 0), Line: "0123"}, // 4 bytes + Samples: []logproto.Sample{ + {Timestamp: time.Unix(45, 0).UnixNano(), Hash: 1, Value: 5.}, // 5 bytes + {Timestamp: time.Unix(60, 0).UnixNano(), Hash: 2, Value: 0.}, + {Timestamp: time.Unix(75, 0).UnixNano(), Hash: 3, Value: 0.}, + {Timestamp: time.Unix(90, 0).UnixNano(), Hash: 4, Value: 0.}, + {Timestamp: time.Unix(105, 0).UnixNano(), Hash: 5, Value: 4.}, // 4 bytes }, }}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(30, 0), End: time.Unix(120, 0), Limit: 0, Selector: `{app="foo"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(120, 0), Selector: `bytes_over_time({app="foo"}[30s])`}}, }, promql.Matrix{ promql.Series{ @@ -1335,20 +1340,20 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `bytes_over_time({app="foo"}[30s]) > 1`, time.Unix(60, 0), time.Unix(120, 0), 15 * time.Second, 0, logproto.FORWARD, 10, - [][]logproto.Stream{ - {logproto.Stream{ + [][]logproto.Series{ + {logproto.Series{ Labels: `{app="foo"}`, - Entries: []logproto.Entry{ - {Timestamp: time.Unix(45, 0), Line: "01234"}, // 5 bytes - {Timestamp: time.Unix(60, 0), Line: ""}, - {Timestamp: time.Unix(75, 0), Line: ""}, - {Timestamp: time.Unix(90, 0), Line: ""}, - {Timestamp: time.Unix(105, 0), Line: "0123"}, // 4 bytes + Samples: []logproto.Sample{ + {Timestamp: time.Unix(45, 0).UnixNano(), Hash: 1, Value: 5.}, // 5 bytes + {Timestamp: time.Unix(60, 0).UnixNano(), Hash: 2, Value: 0.}, + {Timestamp: time.Unix(75, 0).UnixNano(), Hash: 3, Value: 0.}, + {Timestamp: time.Unix(90, 0).UnixNano(), Hash: 4, Value: 0.}, + {Timestamp: time.Unix(105, 0).UnixNano(), Hash: 5, Value: 4.}, // 4 bytes }, }}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(30, 0), End: time.Unix(120, 0), Limit: 0, Selector: `{app="foo"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(120, 0), Selector: `bytes_over_time({app="foo"}[30s])`}}, }, promql.Matrix{ promql.Series{ @@ -1359,20 +1364,20 @@ func TestEngine_RangeQuery(t *testing.T) { }, { `bytes_over_time({app="foo"}[30s]) > bool 1`, time.Unix(60, 0), time.Unix(120, 0), 15 * time.Second, 0, logproto.FORWARD, 10, - [][]logproto.Stream{ - {logproto.Stream{ + [][]logproto.Series{ + {logproto.Series{ Labels: `{app="foo"}`, - Entries: []logproto.Entry{ - {Timestamp: time.Unix(45, 0), Line: "01234"}, // 5 bytes - {Timestamp: time.Unix(60, 0), Line: ""}, - {Timestamp: time.Unix(75, 0), Line: ""}, - {Timestamp: time.Unix(90, 0), Line: ""}, - {Timestamp: time.Unix(105, 0), Line: "0123"}, // 4 bytes + Samples: []logproto.Sample{ + {Timestamp: time.Unix(45, 0).UnixNano(), Hash: 1, Value: 5.}, // 5 bytes + {Timestamp: time.Unix(60, 0).UnixNano(), Hash: 2, Value: 0.}, + {Timestamp: time.Unix(75, 0).UnixNano(), Hash: 3, Value: 0.}, + {Timestamp: time.Unix(90, 0).UnixNano(), Hash: 4, Value: 0.}, + {Timestamp: time.Unix(105, 0).UnixNano(), Hash: 5, Value: 4.}, // 4 bytes }, }}, }, - []SelectParams{ - {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(30, 0), End: time.Unix(120, 0), Limit: 0, Selector: `{app="foo"}`}}, + []SelectSampleParams{ + {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(120, 0), Selector: `bytes_over_time({app="foo"}[30s])`}}, }, promql.Matrix{ promql.Series{ @@ -1392,7 +1397,7 @@ func TestEngine_RangeQuery(t *testing.T) { t.Run(fmt.Sprintf("%s %s", test.qs, test.direction), func(t *testing.T) { t.Parallel() - eng := NewEngine(EngineOpts{}, newQuerierRecorder(test.streams, test.params)) + eng := NewEngine(EngineOpts{}, newQuerierRecorder(t, test.data, test.params)) q := eng.Query(LiteralParams{ qs: test.qs, @@ -1412,12 +1417,23 @@ func TestEngine_RangeQuery(t *testing.T) { } } +type statsQuerier struct{} + +func (statsQuerier) SelectLogs(ctx context.Context, p SelectLogParams) (iter.EntryIterator, error) { + st := stats.GetChunkData(ctx) + st.DecompressedBytes++ + return iter.NoopIterator, nil +} + +func (statsQuerier) SelectSamples(ctx context.Context, p SelectSampleParams) (iter.SampleIterator, error) { + st := stats.GetChunkData(ctx) + st.DecompressedBytes++ + return iter.NoopIterator, nil +} + func TestEngine_Stats(t *testing.T) { - eng := NewEngine(EngineOpts{}, QuerierFunc(func(ctx context.Context, sp SelectParams) (iter.EntryIterator, error) { - st := stats.GetChunkData(ctx) - st.DecompressedBytes++ - return iter.NoopIterator, nil - })) + + eng := NewEngine(EngineOpts{}, &statsQuerier{}) q := eng.Query(LiteralParams{ qs: `{foo="bar"}`, @@ -1431,46 +1447,74 @@ func TestEngine_Stats(t *testing.T) { require.Equal(t, int64(1), r.Statistics.Store.DecompressedBytes) } +type errorIteratorQuerier struct { + samples []iter.SampleIterator + entries []iter.EntryIterator +} + +func (e errorIteratorQuerier) SelectLogs(ctx context.Context, p SelectLogParams) (iter.EntryIterator, error) { + return iter.NewHeapIterator(ctx, e.entries, p.Direction), nil +} +func (e errorIteratorQuerier) SelectSamples(ctx context.Context, p SelectSampleParams) (iter.SampleIterator, error) { + return iter.NewHeapSampleIterator(ctx, e.samples), nil +} + func TestStepEvaluator_Error(t *testing.T) { tests := []struct { - name string - qs string - iters []iter.EntryIterator - err error + name string + qs string + querier Querier + err error }{ { "rangeAggEvaluator", `count_over_time({app="foo"}[1m])`, - []iter.EntryIterator{ - iter.NewStreamIterator(newStream(testSize, identity, `{app="foo"}`)), - NewMockStreamIterator(newStream(testSize, identity, `{app="foo"}`)), + &errorIteratorQuerier{ + samples: []iter.SampleIterator{ + iter.NewSeriesIterator(newSeries(testSize, identity, `{app="foo"}`)), + NewErrorSampleIterator(), + }, + }, + ErrMock, + }, + { + "stream", + `{app="foo"}`, + &errorIteratorQuerier{ + entries: []iter.EntryIterator{ + iter.NewStreamIterator(newStream(testSize, identity, `{app="foo"}`)), + NewErrorEntryIterator(), + }, }, ErrMock, }, { "binOpStepEvaluator", `count_over_time({app="foo"}[1m]) / count_over_time({app="foo"}[1m])`, - []iter.EntryIterator{ - iter.NewStreamIterator(newStream(testSize, identity, `{app="foo"}`)), - NewMockStreamIterator(newStream(testSize, identity, `{app="foo"}`)), + &errorIteratorQuerier{ + samples: []iter.SampleIterator{ + iter.NewSeriesIterator(newSeries(testSize, identity, `{app="foo"}`)), + NewErrorSampleIterator(), + }, }, ErrMockMultiple, }, } for _, tc := range tests { - queryfunc := QuerierFunc(func(ctx context.Context, p SelectParams) (iter.EntryIterator, error) { - return iter.NewHeapIterator(ctx, tc.iters, p.Direction), nil - }) - eng := NewEngine(EngineOpts{}, queryfunc) - q := eng.Query(LiteralParams{ - qs: tc.qs, - start: time.Unix(0, 0), - end: time.Unix(180, 0), - step: 1 * time.Second, + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + tc := tc + eng := NewEngine(EngineOpts{}, tc.querier) + q := eng.Query(LiteralParams{ + qs: tc.qs, + start: time.Unix(0, 0), + end: time.Unix(180, 0), + step: 1 * time.Second, + }) + _, err := q.Exec(context.Background()) + require.Equal(t, tc.err, err) }) - _, err := q.Exec(context.Background()) - require.Equal(t, tc.err, err) } } @@ -1545,45 +1589,87 @@ func benchmarkRangeQuery(testsize int64, b *testing.B) { } func getLocalQuerier(size int64) Querier { - iters := []iter.EntryIterator{ - iter.NewStreamIterator(newStream(size, identity, `{app="foo"}`)), - iter.NewStreamIterator(newStream(size, identity, `{app="foo",bar="foo"}`)), - iter.NewStreamIterator(newStream(size, identity, `{app="foo",bar="bazz"}`)), - iter.NewStreamIterator(newStream(size, identity, `{app="foo",bar="fuzz"}`)), - iter.NewStreamIterator(newStream(size, identity, `{app="bar"}`)), - iter.NewStreamIterator(newStream(size, identity, `{app="bar",bar="foo"}`)), - iter.NewStreamIterator(newStream(size, identity, `{app="bar",bar="bazz"}`)), - iter.NewStreamIterator(newStream(size, identity, `{app="bar",bar="fuzz"}`)), - // some duplicates - iter.NewStreamIterator(newStream(size, identity, `{app="foo"}`)), - iter.NewStreamIterator(newStream(size, identity, `{app="bar"}`)), - iter.NewStreamIterator(newStream(size, identity, `{app="bar",bar="bazz"}`)), - iter.NewStreamIterator(newStream(size, identity, `{app="bar"}`)), + + return &querierRecorder{ + series: map[string][]logproto.Series{ + "": { + newSeries(size, identity, `{app="foo"}`), + newSeries(size, identity, `{app="foo",bar="foo"}`), + newSeries(size, identity, `{app="foo",bar="bazz"}`), + newSeries(size, identity, `{app="foo",bar="fuzz"}`), + newSeries(size, identity, `{app="bar"}`), + newSeries(size, identity, `{app="bar",bar="foo"}`), + newSeries(size, identity, `{app="bar",bar="bazz"}`), + newSeries(size, identity, `{app="bar",bar="fuzz"}`), + // some duplicates + newSeries(size, identity, `{app="foo"}`), + newSeries(size, identity, `{app="bar"}`), + newSeries(size, identity, `{app="bar",bar="bazz"}`), + newSeries(size, identity, `{app="bar"}`), + }, + }, + streams: map[string][]logproto.Stream{ + "": { + newStream(size, identity, `{app="foo"}`), + newStream(size, identity, `{app="foo",bar="foo"}`), + newStream(size, identity, `{app="foo",bar="bazz"}`), + newStream(size, identity, `{app="foo",bar="fuzz"}`), + newStream(size, identity, `{app="bar"}`), + newStream(size, identity, `{app="bar",bar="foo"}`), + newStream(size, identity, `{app="bar",bar="bazz"}`), + newStream(size, identity, `{app="bar",bar="fuzz"}`), + // some duplicates + newStream(size, identity, `{app="foo"}`), + newStream(size, identity, `{app="bar"}`), + newStream(size, identity, `{app="bar",bar="bazz"}`), + newStream(size, identity, `{app="bar"}`), + }, + }, } - return QuerierFunc(func(ctx context.Context, p SelectParams) (iter.EntryIterator, error) { - return iter.NewHeapIterator(ctx, iters, p.Direction), nil - }) } type querierRecorder struct { - source map[string][]logproto.Stream + streams map[string][]logproto.Stream + series map[string][]logproto.Series + match bool } -func newQuerierRecorder(streams [][]logproto.Stream, params []SelectParams) *querierRecorder { - source := map[string][]logproto.Stream{} - for i, p := range params { - source[paramsID(p)] = streams[i] +func newQuerierRecorder(t *testing.T, data interface{}, params interface{}) *querierRecorder { + t.Helper() + streams := map[string][]logproto.Stream{} + if streamsIn, ok := data.([][]logproto.Stream); ok { + if paramsIn, ok2 := params.([]SelectLogParams); ok2 { + for i, p := range paramsIn { + streams[paramsID(p)] = streamsIn[i] + } + } + } + + series := map[string][]logproto.Series{} + if seriesIn, ok := data.([][]logproto.Series); ok { + if paramsIn, ok2 := params.([]SelectSampleParams); ok2 { + for i, p := range paramsIn { + series[paramsID(p)] = seriesIn[i] + } + } } return &querierRecorder{ - source: source, + streams: streams, + series: series, + match: true, } } -func (q *querierRecorder) Select(ctx context.Context, p SelectParams) (iter.EntryIterator, error) { +func (q *querierRecorder) SelectLogs(ctx context.Context, p SelectLogParams) (iter.EntryIterator, error) { + if !q.match { + for _, s := range q.streams { + return iter.NewStreamsIterator(ctx, s, p.Direction), nil + } + } recordID := paramsID(p) - streams, ok := q.source[recordID] + streams, ok := q.streams[recordID] if !ok { - return nil, fmt.Errorf("no streams found for id: %s has: %+v", recordID, q.source) + return nil, fmt.Errorf("no streams found for id: %s has: %+v", recordID, q.streams) } iters := make([]iter.EntryIterator, 0, len(streams)) for _, s := range streams { @@ -1592,7 +1678,25 @@ func (q *querierRecorder) Select(ctx context.Context, p SelectParams) (iter.Entr return iter.NewHeapIterator(ctx, iters, p.Direction), nil } -func paramsID(p SelectParams) string { +func (q *querierRecorder) SelectSamples(ctx context.Context, p SelectSampleParams) (iter.SampleIterator, error) { + if !q.match { + for _, s := range q.series { + return iter.NewMultiSeriesIterator(ctx, s), nil + } + } + recordID := paramsID(p) + series, ok := q.series[recordID] + if !ok { + return nil, fmt.Errorf("no series found for id: %s has: %+v", recordID, q.series) + } + iters := make([]iter.SampleIterator, 0, len(series)) + for _, s := range series { + iters = append(iters, iter.NewSeriesIterator(s)) + } + return iter.NewHeapSampleIterator(ctx, iters), nil +} + +func paramsID(p interface{}) string { b, err := json.Marshal(p) if err != nil { panic(err) @@ -1600,12 +1704,18 @@ func paramsID(p SelectParams) string { return string(b) } -type generator func(i int64) logproto.Entry +type logData struct { + logproto.Entry + // nolint + logproto.Sample +} + +type generator func(i int64) logData func newStream(n int64, f generator, labels string) logproto.Stream { entries := []logproto.Entry{} for i := int64(0); i < n; i++ { - entries = append(entries, f(i)) + entries = append(entries, f(i).Entry) } return logproto.Stream{ Entries: entries, @@ -1613,12 +1723,23 @@ func newStream(n int64, f generator, labels string) logproto.Stream { } } +func newSeries(n int64, f generator, labels string) logproto.Series { + samples := []logproto.Sample{} + for i := int64(0); i < n; i++ { + samples = append(samples, f(i).Sample) + } + return logproto.Series{ + Samples: samples, + Labels: labels, + } +} + func newIntervalStream(n int64, step time.Duration, f generator, labels string) logproto.Stream { entries := []logproto.Entry{} lastEntry := int64(-100) // Start with a really small value (negative) so we always output the first item for i := int64(0); int64(len(entries)) < n; i++ { if float64(lastEntry)+step.Seconds() <= float64(i) { - entries = append(entries, f(i)) + entries = append(entries, f(i).Entry) lastEntry = i } } @@ -1631,7 +1752,7 @@ func newIntervalStream(n int64, step time.Duration, f generator, labels string) func newBackwardStream(n int64, f generator, labels string) logproto.Stream { entries := []logproto.Entry{} for i := n - 1; i > 0; i-- { - entries = append(entries, f(i)) + entries = append(entries, f(i).Entry) } return logproto.Stream{ Entries: entries, @@ -1644,7 +1765,7 @@ func newBackwardIntervalStream(n, expectedResults int64, step time.Duration, f g lastEntry := int64(100000) //Start with some really big value so that we always output the first item for i := n - 1; int64(len(entries)) < expectedResults; i-- { if float64(lastEntry)-step.Seconds() >= float64(i) { - entries = append(entries, f(i)) + entries = append(entries, f(i).Entry) lastEntry = i } } @@ -1654,78 +1775,79 @@ func newBackwardIntervalStream(n, expectedResults int64, step time.Duration, f g } } -func identity(i int64) logproto.Entry { - return logproto.Entry{ - Timestamp: time.Unix(i, 0), - Line: fmt.Sprintf("%d", i), +func identity(i int64) logData { + return logData{ + Entry: logproto.Entry{ + Timestamp: time.Unix(i, 0), + Line: fmt.Sprintf("%d", i), + }, + Sample: logproto.Sample{ + Timestamp: time.Unix(i, 0).UnixNano(), + Value: 1., + Hash: uint64(i), + }, } } // nolint func factor(j int64, g generator) generator { - return func(i int64) logproto.Entry { + return func(i int64) logData { return g(i * j) } } // nolint func offset(j int64, g generator) generator { - return func(i int64) logproto.Entry { + return func(i int64) logData { return g(i + j) } } // nolint func constant(t int64) generator { - return func(i int64) logproto.Entry { - return logproto.Entry{ - Timestamp: time.Unix(t, 0), - Line: fmt.Sprintf("%d", i), + return func(i int64) logData { + return logData{ + Entry: logproto.Entry{ + Timestamp: time.Unix(t, 0), + Line: fmt.Sprintf("%d", i), + }, + Sample: logproto.Sample{ + Timestamp: time.Unix(t, 0).UnixNano(), + Hash: uint64(i), + Value: 1., + }, } } } // nolint func inverse(g generator) generator { - return func(i int64) logproto.Entry { + return func(i int64) logData { return g(-i) } } -// mockstreamIterator mocks error in iterator -type mockStreamIterator struct { - i int - entries []logproto.Entry - labels string - err error -} +// errorIterator +type errorIterator struct{} -// NewMockStreamIterator mocks error in iterator -func NewMockStreamIterator(stream logproto.Stream) iter.EntryIterator { - return &mockStreamIterator{ - i: -1, - entries: stream.Entries, - labels: stream.Labels, - } +// NewErrorSampleIterator return an sample iterator that errors out +func NewErrorSampleIterator() iter.SampleIterator { + return &errorIterator{} } -func (i *mockStreamIterator) Next() bool { - i.err = ErrMock - return false +// NewErrorEntryIterator return an entry iterator that errors out +func NewErrorEntryIterator() iter.EntryIterator { + return &errorIterator{} } -func (i *mockStreamIterator) Error() error { - return i.err -} +func (errorIterator) Next() bool { return false } -func (i *mockStreamIterator) Labels() string { - return i.labels -} +func (errorIterator) Error() error { return ErrMock } -func (i *mockStreamIterator) Entry() logproto.Entry { - return i.entries[i.i] -} +func (errorIterator) Labels() string { return "" } -func (i *mockStreamIterator) Close() error { - return nil -} +func (errorIterator) Entry() logproto.Entry { return logproto.Entry{} } + +func (errorIterator) Sample() logproto.Sample { return logproto.Sample{} } + +func (errorIterator) Close() error { return nil } diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go index d942889f6c..5142661794 100644 --- a/pkg/logql/evaluator.go +++ b/pkg/logql/evaluator.go @@ -129,7 +129,7 @@ func NewDefaultEvaluator(querier Querier, maxLookBackPeriod time.Duration) *Defa } func (ev *DefaultEvaluator) Iterator(ctx context.Context, expr LogSelectorExpr, q Params) (iter.EntryIterator, error) { - params := SelectParams{ + params := SelectLogParams{ QueryRequest: &logproto.QueryRequest{ Start: q.Start(), End: q.End(), @@ -144,7 +144,7 @@ func (ev *DefaultEvaluator) Iterator(ctx context.Context, expr LogSelectorExpr, params.Start = params.Start.Add(-ev.maxLookBackPeriod) } - return ev.querier.Select(ctx, params) + return ev.querier.SelectLogs(ctx, params) } @@ -158,20 +158,18 @@ func (ev *DefaultEvaluator) StepEvaluator( case *vectorAggregationExpr: return vectorAggEvaluator(ctx, nextEv, e, q) case *rangeAggregationExpr: - entryIter, err := ev.querier.Select(ctx, SelectParams{ - &logproto.QueryRequest{ - Start: q.Start().Add(-e.left.interval), - End: q.End(), - Limit: 0, - Direction: logproto.FORWARD, - Selector: expr.Selector().String(), - Shards: q.Shards(), + it, err := ev.querier.SelectSamples(ctx, SelectSampleParams{ + &logproto.SampleQueryRequest{ + Start: q.Start().Add(-e.left.interval), + End: q.End(), + Selector: expr.String(), + Shards: q.Shards(), }, }) if err != nil { return nil, err } - return rangeAggEvaluator(entryIter, e, q) + return rangeAggEvaluator(iter.NewPeekingSampleIterator(it), e, q) case *binOpExpr: return binOpStepEvaluator(ctx, nextEv, e, q) default: @@ -377,7 +375,7 @@ func vectorAggEvaluator( } func rangeAggEvaluator( - entryIter iter.EntryIterator, + it iter.PeekingSampleIterator, expr *rangeAggregationExpr, q Params, ) (StepEvaluator, error) { @@ -385,13 +383,9 @@ func rangeAggEvaluator( if err != nil { return nil, err } - extractor, err := expr.extractor() - if err != nil { - return nil, err - } return rangeVectorEvaluator{ iter: newRangeVectorIterator( - newSeriesIterator(entryIter, extractor), + it, expr.left.interval.Nanoseconds(), q.Step().Nanoseconds(), q.Start().UnixNano(), q.End().UnixNano(), diff --git a/pkg/logql/functions.go b/pkg/logql/functions.go index e2c3e2bcd1..852352c9eb 100644 --- a/pkg/logql/functions.go +++ b/pkg/logql/functions.go @@ -9,12 +9,12 @@ import ( const unsupportedErr = "unsupported range vector aggregation operation: %s" -func (r rangeAggregationExpr) extractor() (SampleExtractor, error) { +func (r rangeAggregationExpr) Extractor() (SampleExtractor, error) { switch r.operation { case OpRangeTypeRate, OpRangeTypeCount: - return extractCount, nil + return ExtractCount, nil case OpRangeTypeBytes, OpRangeTypeBytesRate: - return extractBytes, nil + return ExtractBytes, nil default: return nil, fmt.Errorf(unsupportedErr, r.operation) } diff --git a/pkg/logql/parser.go b/pkg/logql/parser.go index 9293ae3b20..164a6e3a09 100644 --- a/pkg/logql/parser.go +++ b/pkg/logql/parser.go @@ -59,6 +59,19 @@ func ParseMatchers(input string) ([]*labels.Matcher, error) { return matcherExpr.matchers, nil } +// ParseSampleExpr parses a string and returns the sampleExpr +func ParseSampleExpr(input string) (SampleExpr, error) { + expr, err := ParseExpr(input) + if err != nil { + return nil, err + } + sampleExpr, ok := expr.(SampleExpr) + if !ok { + return nil, errors.New("only sample expression supported") + } + return sampleExpr, nil +} + // ParseLogSelector parses a log selector expression `{app="foo"} |= "filter"` func ParseLogSelector(input string) (LogSelectorExpr, error) { expr, err := ParseExpr(input) diff --git a/pkg/logql/range_vector.go b/pkg/logql/range_vector.go index f999e47c6c..2f25e3a162 100644 --- a/pkg/logql/range_vector.go +++ b/pkg/logql/range_vector.go @@ -6,6 +6,8 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + + "github.com/grafana/loki/pkg/iter" ) // RangeVectorAggregator aggregates samples for a given range of samples. @@ -23,7 +25,7 @@ type RangeVectorIterator interface { } type rangeVectorIterator struct { - iter SeriesIterator + iter iter.PeekingSampleIterator selRange, step, end, current int64 window map[string]*promql.Series metrics map[string]labels.Labels @@ -31,7 +33,7 @@ type rangeVectorIterator struct { } func newRangeVectorIterator( - it SeriesIterator, + it iter.PeekingSampleIterator, selRange, step, start, end int64) *rangeVectorIterator { // forces at least one step. if step == 0 { @@ -97,37 +99,37 @@ func (r *rangeVectorIterator) popBack(newStart int64) { // load the next sample range window. func (r *rangeVectorIterator) load(start, end int64) { - for sample, hasNext := r.iter.Peek(); hasNext; sample, hasNext = r.iter.Peek() { - if sample.TimestampNano > end { + for lbs, sample, hasNext := r.iter.Peek(); hasNext; lbs, sample, hasNext = r.iter.Peek() { + if sample.Timestamp > end { // not consuming the iterator as this belong to another range. return } // the lower bound of the range is not inclusive - if sample.TimestampNano <= start { + if sample.Timestamp <= start { _ = r.iter.Next() continue } // adds the sample. var series *promql.Series var ok bool - series, ok = r.window[sample.Labels] + series, ok = r.window[lbs] if !ok { var metric labels.Labels - if metric, ok = r.metrics[sample.Labels]; !ok { + if metric, ok = r.metrics[lbs]; !ok { var err error - metric, err = parser.ParseMetric(sample.Labels) + metric, err = parser.ParseMetric(lbs) if err != nil { continue } - r.metrics[sample.Labels] = metric + r.metrics[lbs] = metric } series = getSeries() series.Metric = metric - r.window[sample.Labels] = series + r.window[lbs] = series } p := promql.Point{ - T: sample.TimestampNano, + T: sample.Timestamp, V: sample.Value, } series.Points = append(series.Points, p) diff --git a/pkg/logql/range_vector_test.go b/pkg/logql/range_vector_test.go index 363f167b22..18e166ae8b 100644 --- a/pkg/logql/range_vector_test.go +++ b/pkg/logql/range_vector_test.go @@ -14,41 +14,38 @@ import ( "github.com/grafana/loki/pkg/logproto" ) -var entries = []logproto.Entry{ - {Timestamp: time.Unix(2, 0)}, - {Timestamp: time.Unix(5, 0)}, - {Timestamp: time.Unix(6, 0)}, - {Timestamp: time.Unix(10, 0)}, - {Timestamp: time.Unix(10, 1)}, - {Timestamp: time.Unix(11, 0)}, - {Timestamp: time.Unix(35, 0)}, - {Timestamp: time.Unix(35, 1)}, - {Timestamp: time.Unix(40, 0)}, - {Timestamp: time.Unix(100, 0)}, - {Timestamp: time.Unix(100, 1)}, +var samples = []logproto.Sample{ + {Timestamp: time.Unix(2, 0).UnixNano(), Hash: 1, Value: 1.}, + {Timestamp: time.Unix(5, 0).UnixNano(), Hash: 2, Value: 1.}, + {Timestamp: time.Unix(6, 0).UnixNano(), Hash: 3, Value: 1.}, + {Timestamp: time.Unix(10, 0).UnixNano(), Hash: 4, Value: 1.}, + {Timestamp: time.Unix(10, 1).UnixNano(), Hash: 5, Value: 1.}, + {Timestamp: time.Unix(11, 0).UnixNano(), Hash: 6, Value: 1.}, + {Timestamp: time.Unix(35, 0).UnixNano(), Hash: 7, Value: 1.}, + {Timestamp: time.Unix(35, 1).UnixNano(), Hash: 8, Value: 1.}, + {Timestamp: time.Unix(40, 0).UnixNano(), Hash: 9, Value: 1.}, + {Timestamp: time.Unix(100, 0).UnixNano(), Hash: 10, Value: 1.}, + {Timestamp: time.Unix(100, 1).UnixNano(), Hash: 11, Value: 1.}, } var labelFoo, _ = parser.ParseMetric("{app=\"foo\"}") var labelBar, _ = parser.ParseMetric("{app=\"bar\"}") -func newEntryIterator() iter.EntryIterator { - return iter.NewHeapIterator(context.Background(), []iter.EntryIterator{ - iter.NewStreamIterator(logproto.Stream{ +func newSampleIterator() iter.SampleIterator { + return iter.NewHeapSampleIterator(context.Background(), []iter.SampleIterator{ + iter.NewSeriesIterator(logproto.Series{ Labels: labelFoo.String(), - Entries: entries, + Samples: samples, }), - iter.NewStreamIterator(logproto.Stream{ + iter.NewSeriesIterator(logproto.Series{ Labels: labelBar.String(), - Entries: entries, + Samples: samples, }), - }, logproto.FORWARD) + }) } -func newfakeSeriesIterator() SeriesIterator { - return &seriesIterator{ - iter: iter.NewPeekingIterator(newEntryIterator()), - sampler: extractCount, - } +func newfakePeekingSampleIterator() iter.PeekingSampleIterator { + return iter.NewPeekingSampleIterator(newSampleIterator()) } func newPoint(t time.Time, v float64) promql.Point { @@ -151,7 +148,7 @@ func Test_RangeVectorIterator(t *testing.T) { t.Run( fmt.Sprintf("logs[%s] - step: %s", time.Duration(tt.selRange), time.Duration(tt.step)), func(t *testing.T) { - it := newRangeVectorIterator(newfakeSeriesIterator(), tt.selRange, + it := newRangeVectorIterator(newfakePeekingSampleIterator(), tt.selRange, tt.step, tt.start.UnixNano(), tt.end.UnixNano()) i := 0 diff --git a/pkg/logql/series_extractor.go b/pkg/logql/series_extractor.go index 5b6ed85c0c..6159828714 100644 --- a/pkg/logql/series_extractor.go +++ b/pkg/logql/series_extractor.go @@ -1,104 +1,24 @@ package logql -import ( - "github.com/grafana/loki/pkg/iter" - "github.com/grafana/loki/pkg/logproto" -) - var ( - extractBytes = bytesSampleExtractor{} - extractCount = countSampleExtractor{} + ExtractBytes = bytesSampleExtractor{} + ExtractCount = countSampleExtractor{} ) -// SeriesIterator is an iterator that iterate over a stream of logs and returns sample. -type SeriesIterator interface { - Close() error - Next() bool - Peek() (Sample, bool) - Error() error -} - -// Sample is a series sample -type Sample struct { - Labels string - Value float64 - TimestampNano int64 -} - -type seriesIterator struct { - iter iter.PeekingEntryIterator - sampler SampleExtractor - - updated bool - cur Sample -} - -func newSeriesIterator(it iter.EntryIterator, sampler SampleExtractor) SeriesIterator { - return &seriesIterator{ - iter: iter.NewPeekingIterator(it), - sampler: sampler, - } -} - -func (e *seriesIterator) Close() error { - return e.iter.Close() -} - -func (e *seriesIterator) Next() bool { - e.updated = false - return e.iter.Next() -} - -func (e *seriesIterator) Peek() (Sample, bool) { - if e.updated { - return e.cur, true - } - - for { - lbs, entry, ok := e.iter.Peek() - if !ok { - return Sample{}, false - } - - // transform - e.cur, ok = e.sampler.From(lbs, entry) - if ok { - break - } - if !e.iter.Next() { - return Sample{}, false - } - } - e.updated = true - return e.cur, true -} - -func (e *seriesIterator) Error() error { - return e.iter.Error() -} - // SampleExtractor transforms a log entry into a sample. // In case of failure the second return value will be false. type SampleExtractor interface { - From(labels string, e logproto.Entry) (Sample, bool) + Extract(line []byte) (float64, bool) } type countSampleExtractor struct{} -func (countSampleExtractor) From(lbs string, entry logproto.Entry) (Sample, bool) { - return Sample{ - Labels: lbs, - TimestampNano: entry.Timestamp.UnixNano(), - Value: 1., - }, true +func (countSampleExtractor) Extract(line []byte) (float64, bool) { + return 1., true } type bytesSampleExtractor struct{} -func (bytesSampleExtractor) From(lbs string, entry logproto.Entry) (Sample, bool) { - return Sample{ - Labels: lbs, - TimestampNano: entry.Timestamp.UnixNano(), - Value: float64(len(entry.Line)), - }, true +func (bytesSampleExtractor) Extract(line []byte) (float64, bool) { + return float64(len(line)), true } diff --git a/pkg/logql/series_extractor_test.go b/pkg/logql/series_extractor_test.go deleted file mode 100644 index 8ef572d2a2..0000000000 --- a/pkg/logql/series_extractor_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package logql - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/grafana/loki/pkg/iter" - "github.com/grafana/loki/pkg/logproto" -) - -func Test_seriesIterator_Peek(t *testing.T) { - type expectation struct { - ok bool - sample Sample - } - for _, test := range []struct { - name string - it SeriesIterator - expectations []expectation - }{ - { - "count", - newSeriesIterator(iter.NewStreamIterator(newStream(5, identity, `{app="foo"}`)), extractCount), - []expectation{ - {true, Sample{Labels: `{app="foo"}`, TimestampNano: 0, Value: 1}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(1, 0).UnixNano(), Value: 1}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(2, 0).UnixNano(), Value: 1}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(3, 0).UnixNano(), Value: 1}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(4, 0).UnixNano(), Value: 1}}, - {false, Sample{}}, - }, - }, - { - "bytes empty", - newSeriesIterator( - iter.NewStreamIterator( - newStream( - 3, - func(i int64) logproto.Entry { - return logproto.Entry{ - Timestamp: time.Unix(i, 0), - } - }, - `{app="foo"}`, - ), - ), - extractBytes, - ), - []expectation{ - {true, Sample{Labels: `{app="foo"}`, TimestampNano: 0, Value: 0}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(1, 0).UnixNano(), Value: 0}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(2, 0).UnixNano(), Value: 0}}, - {false, Sample{}}, - }, - }, - { - "bytes", - newSeriesIterator( - iter.NewStreamIterator( - newStream( - 3, - func(i int64) logproto.Entry { - return logproto.Entry{ - Timestamp: time.Unix(i, 0), - Line: "foo", - } - }, - `{app="foo"}`, - ), - ), - extractBytes, - ), - []expectation{ - {true, Sample{Labels: `{app="foo"}`, TimestampNano: 0, Value: 3}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(1, 0).UnixNano(), Value: 3}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(2, 0).UnixNano(), Value: 3}}, - {false, Sample{}}, - }, - }, - { - "bytes backward", - newSeriesIterator( - iter.NewStreamsIterator(context.Background(), - []logproto.Stream{ - newStream( - 3, - func(i int64) logproto.Entry { - return logproto.Entry{ - Timestamp: time.Unix(i, 0), - Line: "foo", - } - }, - `{app="foo"}`, - ), - newStream( - 3, - func(i int64) logproto.Entry { - return logproto.Entry{ - Timestamp: time.Unix(i, 0), - Line: "barr", - } - }, - `{app="barr"}`, - ), - }, - logproto.BACKWARD, - ), - extractBytes, - ), - []expectation{ - {true, Sample{Labels: `{app="barr"}`, TimestampNano: 0, Value: 4}}, - {true, Sample{Labels: `{app="barr"}`, TimestampNano: time.Unix(1, 0).UnixNano(), Value: 4}}, - {true, Sample{Labels: `{app="barr"}`, TimestampNano: time.Unix(2, 0).UnixNano(), Value: 4}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: 0, Value: 3}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(1, 0).UnixNano(), Value: 3}}, - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(2, 0).UnixNano(), Value: 3}}, - {false, Sample{}}, - }, - }, - { - "skip first", - newSeriesIterator(iter.NewStreamIterator(newStream(2, identity, `{app="foo"}`)), fakeSampler{}), - []expectation{ - {true, Sample{Labels: `{app="foo"}`, TimestampNano: time.Unix(1, 0).UnixNano(), Value: 10}}, - {false, Sample{}}, - }, - }, - } { - t.Run(test.name, func(t *testing.T) { - for _, e := range test.expectations { - sample, ok := test.it.Peek() - require.Equal(t, e.ok, ok) - if !e.ok { - continue - } - require.Equal(t, e.sample, sample) - test.it.Next() - } - require.NoError(t, test.it.Close()) - }) - } -} - -// fakeSampler is a Sampler that returns no value for 0 timestamp otherwise always 10 -type fakeSampler struct{} - -func (fakeSampler) From(lbs string, entry logproto.Entry) (Sample, bool) { - if entry.Timestamp.UnixNano() == 0 { - return Sample{}, false - } - return Sample{ - Labels: lbs, - TimestampNano: entry.Timestamp.UnixNano(), - Value: 10, - }, true -} diff --git a/pkg/logql/sharding.go b/pkg/logql/sharding.go index 405cecf440..6fadb1c682 100644 --- a/pkg/logql/sharding.go +++ b/pkg/logql/sharding.go @@ -166,7 +166,7 @@ type Downstreamer interface { // DownstreamEvaluator is an evaluator which handles shard aware AST nodes type DownstreamEvaluator struct { Downstreamer - defaultEvaluator *DefaultEvaluator + defaultEvaluator Evaluator } // Downstream runs queries and collects stats from the embedded Downstreamer @@ -186,16 +186,19 @@ func (ev DownstreamEvaluator) Downstream(ctx context.Context, queries []Downstre } +type errorQuerier struct{} + +func (errorQuerier) SelectLogs(ctx context.Context, p SelectLogParams) (iter.EntryIterator, error) { + return nil, errors.New("Unimplemented") +} +func (errorQuerier) SelectSamples(ctx context.Context, p SelectSampleParams) (iter.SampleIterator, error) { + return nil, errors.New("Unimplemented") +} + func NewDownstreamEvaluator(downstreamer Downstreamer) *DownstreamEvaluator { return &DownstreamEvaluator{ - Downstreamer: downstreamer, - defaultEvaluator: NewDefaultEvaluator( - QuerierFunc(func(_ context.Context, p SelectParams) (iter.EntryIterator, error) { - // TODO(owen-d): add metric here, this should never happen. - return nil, errors.New("Unimplemented") - }), - 0, - ), + Downstreamer: downstreamer, + defaultEvaluator: NewDefaultEvaluator(&errorQuerier{}, 0), } } diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go index a042bc8aa8..941e94b015 100644 --- a/pkg/logql/shardmapper_test.go +++ b/pkg/logql/shardmapper_test.go @@ -132,19 +132,19 @@ func TestMappingStrings(t *testing.T) { }, { in: `sum(rate({foo="bar"}[1m]))`, - out: `sum(downstream ++ downstream)`, + out: `sum(downstream ++ downstream)`, }, { in: `max(count(rate({foo="bar"}[5m]))) / 2`, - out: `max(sum(downstream ++ downstream)) / 2.000000`, + out: `max(sum(downstream ++ downstream)) / 2.000000`, }, { in: `topk(3, rate({foo="bar"}[5m]))`, - out: `topk(3,downstream ++ downstream)`, + out: `topk(3,downstream ++ downstream)`, }, { in: `sum(max(rate({foo="bar"}[5m])))`, - out: `sum(max(downstream ++ downstream))`, + out: `sum(max(downstream ++ downstream))`, }, { in: `{foo="bar"} |= "id=123"`, @@ -152,7 +152,7 @@ func TestMappingStrings(t *testing.T) { }, { in: `sum by (cluster) (rate({foo="bar"} |= "id=123" [5m]))`, - out: `sum by(cluster)(downstream ++ downstream)`, + out: `sum by(cluster)(downstream ++ downstream)`, }, } { t.Run(tc.in, func(t *testing.T) { diff --git a/pkg/logql/stats/grpc_test.go b/pkg/logql/stats/grpc_test.go index d44538f81c..00b13ff819 100644 --- a/pkg/logql/stats/grpc_test.go +++ b/pkg/logql/stats/grpc_test.go @@ -35,7 +35,7 @@ func TestCollectTrailer(t *testing.T) { t.Fatalf("Failed to dial bufnet: %v", err) } defer conn.Close() - ing := ingesterFn(func(req *logproto.QueryRequest, s logproto.Querier_QueryServer) error { + ing := ingesterFn(func(s grpc.ServerStream) error { ingCtx := NewContext(s.Context()) defer SendAsTrailer(ingCtx, s) GetIngesterData(ingCtx).TotalChunksMatched++ @@ -60,7 +60,7 @@ func TestCollectTrailer(t *testing.T) { ctx = NewContext(ctx) - // query the ingester twice. + // query the ingester twice once for logs , once for samples. clientStream, err := ingClient.Query(ctx, &logproto.QueryRequest{}, CollectTrailer(ctx)) if err != nil { t.Fatal(err) @@ -69,15 +69,15 @@ func TestCollectTrailer(t *testing.T) { if err != nil && err != io.EOF { t.Fatal(err) } - clientStream, err = ingClient.Query(ctx, &logproto.QueryRequest{}, CollectTrailer(ctx)) + clientSamples, err := ingClient.QuerySample(ctx, &logproto.SampleQueryRequest{}, CollectTrailer(ctx)) if err != nil { t.Fatal(err) } - _, err = clientStream.Recv() + _, err = clientSamples.Recv() if err != nil && err != io.EOF { t.Fatal(err) } - err = clientStream.CloseSend() + err = clientSamples.CloseSend() if err != nil { t.Fatal(err) } @@ -94,10 +94,14 @@ func TestCollectTrailer(t *testing.T) { require.Equal(t, int64(2), res.Ingester.TotalDuplicates) } -type ingesterFn func(*logproto.QueryRequest, logproto.Querier_QueryServer) error +type ingesterFn func(grpc.ServerStream) error + +func (i ingesterFn) Query(_ *logproto.QueryRequest, s logproto.Querier_QueryServer) error { + return i(s) +} -func (i ingesterFn) Query(req *logproto.QueryRequest, s logproto.Querier_QueryServer) error { - return i(req, s) +func (i ingesterFn) QuerySample(_ *logproto.SampleQueryRequest, s logproto.Querier_QuerySampleServer) error { + return i(s) } func (ingesterFn) Label(context.Context, *logproto.LabelRequest) (*logproto.LabelResponse, error) { return nil, nil diff --git a/pkg/logql/test_utils.go b/pkg/logql/test_utils.go index 9ba4ba322a..ecaa17bd1f 100644 --- a/pkg/logql/test_utils.go +++ b/pkg/logql/test_utils.go @@ -6,6 +6,7 @@ import ( "log" "time" + "github.com/cespare/xxhash/v2" "github.com/cortexproject/cortex/pkg/querier/astmapper" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql/parser" @@ -27,7 +28,7 @@ type MockQuerier struct { streams []logproto.Stream } -func (q MockQuerier) Select(_ context.Context, req SelectParams) (iter.EntryIterator, error) { +func (q MockQuerier) SelectLogs(ctx context.Context, req SelectLogParams) (iter.EntryIterator, error) { expr, err := req.LogSelector() if err != nil { return nil, err @@ -91,12 +92,94 @@ outer: } return iter.NewTimeRangedIterator( - iter.NewStreamsIterator(context.Background(), filtered, req.Direction), + iter.NewStreamsIterator(ctx, filtered, req.Direction), req.Start, req.End, ), nil } +func (q MockQuerier) SelectSamples(ctx context.Context, req SelectSampleParams) (iter.SampleIterator, error) { + selector, err := req.LogSelector() + if err != nil { + return nil, err + } + filter, err := selector.Filter() + if err != nil { + return nil, err + } + expr, err := req.Expr() + if err != nil { + return nil, err + } + + extractor, err := expr.Extractor() + if err != nil { + return nil, err + } + + matchers := selector.Matchers() + + var shard *astmapper.ShardAnnotation + if len(req.Shards) > 0 { + shards, err := ParseShards(req.Shards) + if err != nil { + return nil, err + } + shard = &shards[0] + } + + var matched []logproto.Stream + +outer: + for _, stream := range q.streams { + ls := mustParseLabels(stream.Labels) + + // filter by shard if requested + if shard != nil && ls.Hash()%uint64(shard.Of) != uint64(shard.Shard) { + continue + } + + for _, matcher := range matchers { + if !matcher.Matches(ls.Get(matcher.Name)) { + continue outer + } + } + matched = append(matched, stream) + } + + // apply the LineFilter + filtered := make([]logproto.Series, 0, len(matched)) + for _, s := range matched { + var samples []logproto.Sample + for _, entry := range s.Entries { + if filter == nil || filter.Filter([]byte(entry.Line)) { + v, ok := extractor.Extract([]byte(entry.Line)) + if !ok { + continue + } + samples = append(samples, logproto.Sample{ + Timestamp: entry.Timestamp.UnixNano(), + Value: v, + Hash: xxhash.Sum64([]byte(entry.Line)), + }) + } + } + + if len(samples) > 0 { + filtered = append(filtered, logproto.Series{ + Labels: s.Labels, + Samples: samples, + }) + } + } + + return iter.NewTimeRangedSampleIterator( + iter.NewMultiSeriesIterator(ctx, filtered), + req.Start.UnixNano(), + req.End.UnixNano(), + ), nil +} + type MockDownstreamer struct { *Engine } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 88e95308d3..4d73d9368a 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -141,8 +141,8 @@ func (q *Querier) forGivenIngesters(ctx context.Context, replicationSet ring.Rep } // Select Implements logql.Querier which select logs via matchers and regex filters. -func (q *Querier) Select(ctx context.Context, params logql.SelectParams) (iter.EntryIterator, error) { - err := q.validateQueryRequest(ctx, params.QueryRequest) +func (q *Querier) SelectLogs(ctx context.Context, params logql.SelectLogParams) (iter.EntryIterator, error) { + err := q.validateQueryRequest(ctx, params) if err != nil { return nil, err } @@ -151,7 +151,7 @@ func (q *Querier) Select(ctx context.Context, params logql.SelectParams) (iter.E if q.cfg.IngesterQueryStoreMaxLookback == 0 { // IngesterQueryStoreMaxLookback is zero, the default state, query the store normally - chunkStoreIter, err = q.store.LazyQuery(ctx, params) + chunkStoreIter, err = q.store.SelectLogs(ctx, params) if err != nil { return nil, err } @@ -165,11 +165,11 @@ func (q *Querier) Select(ctx context.Context, params logql.SelectParams) (iter.E // Make a copy of the request before modifying // because the initial request is used below to query ingesters queryRequestCopy := *params.QueryRequest - newParams := logql.SelectParams{ + newParams := logql.SelectLogParams{ QueryRequest: &queryRequestCopy, } newParams.End = adjustedEnd - chunkStoreIter, err = q.store.LazyQuery(ctx, newParams) + chunkStoreIter, err = q.store.SelectLogs(ctx, newParams) if err != nil { return nil, err } @@ -182,7 +182,7 @@ func (q *Querier) Select(ctx context.Context, params logql.SelectParams) (iter.E // skip ingester queries only when QueryIngestersWithin is enabled (not the zero value) and // the end of the query is earlier than the lookback - if lookback := time.Now().Add(-q.cfg.QueryIngestersWithin); q.cfg.QueryIngestersWithin != 0 && params.GetEnd().Before(lookback) { + if !shouldQueryIngester(q.cfg, params) { return chunkStoreIter, nil } @@ -194,7 +194,61 @@ func (q *Querier) Select(ctx context.Context, params logql.SelectParams) (iter.E return iter.NewHeapIterator(ctx, append(iters, chunkStoreIter), params.Direction), nil } -func (q *Querier) queryIngesters(ctx context.Context, params logql.SelectParams) ([]iter.EntryIterator, error) { +func (q *Querier) SelectSamples(ctx context.Context, params logql.SelectSampleParams) (iter.SampleIterator, error) { + err := q.validateQueryRequest(ctx, params) + if err != nil { + return nil, err + } + + var chunkStoreIter iter.SampleIterator + + switch { + case q.cfg.IngesterQueryStoreMaxLookback == 0: + // IngesterQueryStoreMaxLookback is zero, the default state, query the store normally + chunkStoreIter, err = q.store.SelectSamples(ctx, params) + if err != nil { + return nil, err + } + case q.cfg.IngesterQueryStoreMaxLookback > 0: + adjustedEnd := params.End.Add(-q.cfg.IngesterQueryStoreMaxLookback) + if params.Start.After(adjustedEnd) { + chunkStoreIter = iter.NoopIterator + break + } + // Make a copy of the request before modifying + // because the initial request is used below to query ingesters + queryRequestCopy := *params.SampleQueryRequest + newParams := logql.SelectSampleParams{ + SampleQueryRequest: &queryRequestCopy, + } + newParams.End = adjustedEnd + chunkStoreIter, err = q.store.SelectSamples(ctx, newParams) + if err != nil { + return nil, err + } + default: + chunkStoreIter = iter.NoopIterator + + } + // skip ingester queries only when QueryIngestersWithin is enabled (not the zero value) and + // the end of the query is earlier than the lookback + if !shouldQueryIngester(q.cfg, params) { + return chunkStoreIter, nil + } + + iters, err := q.queryIngestersForSample(ctx, params) + if err != nil { + return nil, err + } + return iter.NewHeapSampleIterator(ctx, append(iters, chunkStoreIter)), nil +} + +func shouldQueryIngester(cfg Config, params logql.QueryParams) bool { + lookback := time.Now().Add(-cfg.QueryIngestersWithin) + return !(cfg.QueryIngestersWithin != 0 && params.GetEnd().Before(lookback)) +} + +func (q *Querier) queryIngesters(ctx context.Context, params logql.SelectLogParams) ([]iter.EntryIterator, error) { clients, err := q.forAllIngesters(ctx, func(client logproto.QuerierClient) (interface{}, error) { return client.Query(ctx, params.QueryRequest, stats.CollectTrailer(ctx)) }) @@ -209,6 +263,21 @@ func (q *Querier) queryIngesters(ctx context.Context, params logql.SelectParams) return iterators, nil } +func (q *Querier) queryIngestersForSample(ctx context.Context, params logql.SelectSampleParams) ([]iter.SampleIterator, error) { + clients, err := q.forAllIngesters(ctx, func(client logproto.QuerierClient) (interface{}, error) { + return client.QuerySample(ctx, params.SampleQueryRequest, stats.CollectTrailer(ctx)) + }) + if err != nil { + return nil, err + } + + iterators := make([]iter.SampleIterator, len(clients)) + for i := range clients { + iterators[i] = iter.NewSampleQueryClientIterator(clients[i].response.(logproto.Querier_QuerySampleClient)) + } + return iterators, nil +} + // Label does the heavy lifting for a Label query. func (q *Querier) Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { // Enforce the query timeout while querying backends @@ -264,7 +333,7 @@ func (q *Querier) Tail(ctx context.Context, req *logproto.TailRequest) (*Tailer, return nil, err } - histReq := logql.SelectParams{ + histReq := logql.SelectLogParams{ QueryRequest: &logproto.QueryRequest{ Selector: req.Query, Start: req.Start, @@ -274,7 +343,7 @@ func (q *Querier) Tail(ctx context.Context, req *logproto.TailRequest) (*Tailer, }, } - err = q.validateQueryRequest(ctx, histReq.QueryRequest) + err = q.validateQueryRequest(ctx, histReq) if err != nil { return nil, err } @@ -297,7 +366,7 @@ func (q *Querier) Tail(ctx context.Context, req *logproto.TailRequest) (*Tailer, tailClients[clients[i].addr] = clients[i].response.(logproto.Querier_TailClient) } - histIterators, err := q.Select(queryCtx, histReq) + histIterators, err := q.SelectLogs(queryCtx, histReq) if err != nil { return nil, err } @@ -376,7 +445,7 @@ func (q *Querier) Series(ctx context.Context, req *logproto.SeriesRequest) (*log return nil, err } - if err = q.validateQueryTimeRange(userID, &req.Start, &req.End); err != nil { + if err = q.validateQueryTimeRange(userID, req.Start, req.End); err != nil { return nil, err } @@ -483,7 +552,7 @@ func (q *Querier) seriesForMatchers( // seriesForMatcher fetches series from the store for a given matcher func (q *Querier) seriesForMatcher(ctx context.Context, from, through time.Time, matcher string) ([]logproto.SeriesIdentifier, error) { - ids, err := q.store.GetSeries(ctx, logql.SelectParams{ + ids, err := q.store.GetSeries(ctx, logql.SelectLogParams{ QueryRequest: &logproto.QueryRequest{ Selector: matcher, Limit: 1, @@ -498,13 +567,13 @@ func (q *Querier) seriesForMatcher(ctx context.Context, from, through time.Time, return ids, nil } -func (q *Querier) validateQueryRequest(ctx context.Context, req *logproto.QueryRequest) error { +func (q *Querier) validateQueryRequest(ctx context.Context, req logql.QueryParams) error { userID, err := user.ExtractOrgID(ctx) if err != nil { return err } - selector, err := logql.ParseLogSelector(req.Selector) + selector, err := req.LogSelector() if err != nil { return err } @@ -516,17 +585,17 @@ func (q *Querier) validateQueryRequest(ctx context.Context, req *logproto.QueryR "max streams matchers per query exceeded, matchers-count > limit (%d > %d)", len(matchers), maxStreamMatchersPerQuery) } - return q.validateQueryTimeRange(userID, &req.Start, &req.End) + return q.validateQueryTimeRange(userID, req.GetStart(), req.GetEnd()) } -func (q *Querier) validateQueryTimeRange(userID string, from *time.Time, through *time.Time) error { - if (*through).Before(*from) { - return httpgrpc.Errorf(http.StatusBadRequest, "invalid query, through < from (%s < %s)", *through, *from) +func (q *Querier) validateQueryTimeRange(userID string, from time.Time, through time.Time) error { + if (through).Before(from) { + return httpgrpc.Errorf(http.StatusBadRequest, "invalid query, through < from (%s < %s)", through, from) } maxQueryLength := q.limits.MaxQueryLength(userID) - if maxQueryLength > 0 && (*through).Sub(*from) > maxQueryLength { - return httpgrpc.Errorf(http.StatusBadRequest, cortex_validation.ErrQueryTooLong, (*through).Sub(*from), maxQueryLength) + if maxQueryLength > 0 && (through).Sub(from) > maxQueryLength { + return httpgrpc.Errorf(http.StatusBadRequest, cortex_validation.ErrQueryTooLong, (through).Sub(from), maxQueryLength) } return nil diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index 79b1860558..814a984b7a 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -207,7 +207,7 @@ func newStoreMock() *storeMock { return &storeMock{} } -func (s *storeMock) LazyQuery(ctx context.Context, req logql.SelectParams) (iter.EntryIterator, error) { +func (s *storeMock) SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) { args := s.Called(ctx, req) res := args.Get(0) if res == nil { @@ -216,6 +216,15 @@ func (s *storeMock) LazyQuery(ctx context.Context, req logql.SelectParams) (iter return res.(iter.EntryIterator), args.Error(1) } +func (s *storeMock) SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) { + args := s.Called(ctx, req) + res := args.Get(0) + if res == nil { + return iter.SampleIterator(nil), args.Error(1) + } + return res.(iter.SampleIterator), args.Error(1) +} + func (s *storeMock) Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) { args := s.Called(ctx, userID, from, through, matchers) return args.Get(0).([]chunk.Chunk), args.Error(1) @@ -252,7 +261,7 @@ func (s *storeMock) DeleteSeriesIDs(ctx context.Context, from, through model.Tim panic("don't call me please") } -func (s *storeMock) GetSeries(ctx context.Context, req logql.SelectParams) ([]logproto.SeriesIdentifier, error) { +func (s *storeMock) GetSeries(ctx context.Context, req logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) { args := s.Called(ctx, req) res := args.Get(0) if res == nil { diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 25587a4a46..484397e100 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -85,7 +85,7 @@ func TestQuerier_Tail_QueryTimeoutConfigFlag(t *testing.T) { } store := newStoreMock() - store.On("LazyQuery", mock.Anything, mock.Anything).Return(mockStreamIterator(1, 2), nil) + store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(1, 2), nil) queryClient := newQueryClientMock() queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 2)}), nil) @@ -124,7 +124,7 @@ func TestQuerier_Tail_QueryTimeoutConfigFlag(t *testing.T) { _, ok = calls[0].Arguments.Get(0).(context.Context).Deadline() assert.False(t, ok) - calls = store.GetMockedCallsByMethod("LazyQuery") + calls = store.GetMockedCallsByMethod("SelectLogs") assert.Equal(t, 1, len(calls)) deadline, ok = calls[0].Arguments.Get(0).(context.Context).Deadline() assert.True(t, ok) @@ -261,7 +261,7 @@ func TestQuerier_validateQueryRequest(t *testing.T) { } store := newStoreMock() - store.On("LazyQuery", mock.Anything, mock.Anything).Return(mockStreamIterator(1, 2), nil) + store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(1, 2), nil) queryClient := newQueryClientMock() queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 2)}), nil) @@ -286,15 +286,15 @@ func TestQuerier_validateQueryRequest(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "test") - _, err = q.Select(ctx, logql.SelectParams{QueryRequest: &request}) + _, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request}) require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "max streams matchers per query exceeded, matchers-count > limit (2 > 1)"), err) request.Selector = "{type=\"test\"}" - _, err = q.Select(ctx, logql.SelectParams{QueryRequest: &request}) + _, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request}) require.NoError(t, err) request.Start = request.End.Add(-3 * time.Minute) - _, err = q.Select(ctx, logql.SelectParams{QueryRequest: &request}) + _, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request}) require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "invalid query, length > limit (3m0s > 2m0s)"), err) } @@ -493,7 +493,7 @@ func TestQuerier_IngesterMaxQueryLookback(t *testing.T) { } store := newStoreMock() - store.On("LazyQuery", mock.Anything, mock.Anything).Return(mockStreamIterator(0, 1), nil) + store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(0, 1), nil) conf := mockQuerierConfig() conf.QueryIngestersWithin = tc.lookback @@ -507,7 +507,7 @@ func TestQuerier_IngesterMaxQueryLookback(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "test") - res, err := q.Select(ctx, logql.SelectParams{QueryRequest: &req}) + res, err := q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &req}) require.Nil(t, err) // since streams are loaded lazily, force iterators to exhaust @@ -570,7 +570,7 @@ func TestQuerier_concurrentTailLimits(t *testing.T) { // For this test's purpose, whenever a new ingester client needs to // be created, the factory will always return the same mock instance store := newStoreMock() - store.On("LazyQuery", mock.Anything, mock.Anything).Return(mockStreamIterator(1, 2), nil) + store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(1, 2), nil) queryClient := newQueryClientMock() queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 2)}), nil) diff --git a/pkg/storage/batch.go b/pkg/storage/batch.go index 327bee5fcb..1578029aa9 100644 --- a/pkg/storage/batch.go +++ b/pkg/storage/batch.go @@ -22,6 +22,15 @@ import ( "github.com/grafana/loki/pkg/logql/stats" ) +type genericIterator interface { + Next() bool + Labels() string + Error() error + Close() error +} + +type chunksIteratorFactory func(chunks []*LazyChunk, from, through time.Time, nextChunk *LazyChunk) (genericIterator, error) + // batchChunkIterator is an EntryIterator that iterates through chunks by batch of `batchSize`. // Since chunks can overlap across batches for each iteration the iterator will keep all overlapping // chunks with the next chunk from the next batch and added it to the next iteration. In this case the boundaries of the batch @@ -30,79 +39,75 @@ type batchChunkIterator struct { chunks lazyChunks batchSize int err error - curr iter.EntryIterator + curr genericIterator lastOverlapping []*LazyChunk - labels map[model.Fingerprint]string + iterFactory chunksIteratorFactory - ctx context.Context - cancel context.CancelFunc - matchers []*labels.Matcher - filter logql.LineFilter - req *logproto.QueryRequest - next chan *struct { - iter iter.EntryIterator + cancel context.CancelFunc + start, end time.Time + direction logproto.Direction + next chan *struct { + iter genericIterator err error } } // newBatchChunkIterator creates a new batch iterator with the given batchSize. -func newBatchChunkIterator(ctx context.Context, chunks []*LazyChunk, batchSize int, matchers []*labels.Matcher, filter logql.LineFilter, req *logproto.QueryRequest) *batchChunkIterator { - // __name__ is not something we filter by because it's a constant in loki - // and only used for upstream compatibility; therefore remove it. - // The same applies to the sharding label which is injected by the cortex storage code. - for _, omit := range []string{labels.MetricName, astmapper.ShardLabel} { - for i := range matchers { - if matchers[i].Name == omit { - matchers = append(matchers[:i], matchers[i+1:]...) - break - } - } - } +func newBatchChunkIterator( + ctx context.Context, + chunks []*LazyChunk, + batchSize int, + direction logproto.Direction, + start, end time.Time, + iterFactory chunksIteratorFactory, +) *batchChunkIterator { ctx, cancel := context.WithCancel(ctx) res := &batchChunkIterator{ batchSize: batchSize, - matchers: matchers, - filter: filter, - req: req, - ctx: ctx, - cancel: cancel, - chunks: lazyChunks{direction: req.Direction, chunks: chunks}, - labels: map[model.Fingerprint]string{}, + + start: start, + end: end, + direction: direction, + cancel: cancel, + iterFactory: iterFactory, + chunks: lazyChunks{direction: direction, chunks: chunks}, next: make(chan *struct { - iter iter.EntryIterator + iter genericIterator err error }), } sort.Sort(res.chunks) - go func() { - for { - if res.chunks.Len() == 0 { - close(res.next) + go res.loop(ctx) + return res +} + +func (it *batchChunkIterator) loop(ctx context.Context) { + for { + if it.chunks.Len() == 0 { + close(it.next) + return + } + next, err := it.nextBatch() + select { + case <-ctx.Done(): + close(it.next) + // next can be nil if we are waiting to return that the nextBatch was empty and the context is closed + // or if another error occurred reading nextBatch + if next == nil { return } - next, err := res.nextBatch() - select { - case <-ctx.Done(): - close(res.next) - // next can be nil if we are waiting to return that the nextBatch was empty and the context is closed - // or if another error occurred reading nextBatch - if next == nil { - return - } - err = next.Close() - if err != nil { - level.Error(util.WithContext(ctx, util.Logger)).Log("msg", "Failed to close the pre-fetched iterator when pre-fetching was canceled", "err", err) - } - return - case res.next <- &struct { - iter iter.EntryIterator - err error - }{next, err}: + err = next.Close() + if err != nil { + level.Error(util.WithContext(ctx, util.Logger)).Log("msg", "Failed to close the pre-fetched iterator when pre-fetching was canceled", "err", err) } + return + case it.next <- &struct { + iter genericIterator + err error + }{next, err}: } - }() - return res + } } func (it *batchChunkIterator) Next() bool { @@ -128,10 +133,10 @@ func (it *batchChunkIterator) Next() bool { } } -func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { +func (it *batchChunkIterator) nextBatch() (genericIterator, error) { // the first chunk of the batch headChunk := it.chunks.Peek() - from, through := it.req.Start, it.req.End + from, through := it.start, it.end batch := make([]*LazyChunk, 0, it.batchSize+len(it.lastOverlapping)) var nextChunk *LazyChunk @@ -139,11 +144,11 @@ func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { // pop the next batch of chunks and append/prepend previous overlapping chunks // so we can merge/de-dupe overlapping entries. - if it.req.Direction == logproto.FORWARD { + if it.direction == logproto.FORWARD { batch = append(batch, it.lastOverlapping...) } batch = append(batch, it.chunks.pop(it.batchSize)...) - if it.req.Direction == logproto.BACKWARD { + if it.direction == logproto.BACKWARD { batch = append(batch, it.lastOverlapping...) } @@ -151,14 +156,14 @@ func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { nextChunk = it.chunks.Peek() // we max out our iterator boundaries to the next chunks in the queue // so that overlapping chunks are together - if it.req.Direction == logproto.BACKWARD { + if it.direction == logproto.BACKWARD { from = time.Unix(0, nextChunk.Chunk.Through.UnixNano()) // we have to reverse the inclusivity of the chunk iterator from // [from, through) to (from, through] for backward queries, except when // the batch's `from` is equal to the query's Start. This can be achieved // by shifting `from` by one nanosecond. - if !from.Equal(it.req.Start) { + if !from.Equal(it.start) { from = from.Add(time.Nanosecond) } } else { @@ -184,18 +189,18 @@ func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { } - if it.req.Direction == logproto.BACKWARD { + if it.direction == logproto.BACKWARD { through = time.Unix(0, headChunk.Chunk.Through.UnixNano()) - if through.After(it.req.End) { - through = it.req.End + if through.After(it.end) { + through = it.end } // we have to reverse the inclusivity of the chunk iterator from // [from, through) to (from, through] for backward queries, except when // the batch's `through` is equal to the query's End. This can be achieved // by shifting `through` by one nanosecond. - if !through.Equal(it.req.End) { + if !through.Equal(it.end) { through = through.Add(time.Nanosecond) } } else { @@ -203,8 +208,8 @@ func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { // when clipping the from it should never be before the start or equal to the end. // Doing so would include entries not requested. - if from.Before(it.req.Start) || from.Equal(it.req.End) { - from = it.req.Start + if from.Before(it.start) || from.Equal(it.end) { + from = it.start } } @@ -218,17 +223,13 @@ func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { if it.chunks.Len() > 0 { it.lastOverlapping = it.lastOverlapping[:0] for _, c := range batch { - if c.IsOverlapping(nextChunk, it.req.Direction) { + if c.IsOverlapping(nextChunk, it.direction) { it.lastOverlapping = append(it.lastOverlapping, c) } } } // create the new chunks iterator from the current batch. - return it.newChunksIterator(batch, from, through, nextChunk) -} - -func (it *batchChunkIterator) Entry() logproto.Entry { - return it.curr.Entry() + return it.iterFactory(batch, from, through, nextChunk) } func (it *batchChunkIterator) Labels() string { @@ -253,29 +254,59 @@ func (it *batchChunkIterator) Close() error { return nil } -// newChunksIterator creates an iterator over a set of lazychunks. -func (it *batchChunkIterator) newChunksIterator(chunks []*LazyChunk, from, through time.Time, nextChunk *LazyChunk) (iter.EntryIterator, error) { - chksBySeries := partitionBySeriesChunks(chunks) +type labelCache map[model.Fingerprint]string - // Make sure the initial chunks are loaded. This is not one chunk - // per series, but rather a chunk per non-overlapping iterator. - if err := loadFirstChunks(it.ctx, chksBySeries); err != nil { - return nil, err +// computeLabels compute the labels string representation, uses a map to cache result per fingerprint. +func (l labelCache) computeLabels(c *LazyChunk) string { + if lbs, ok := l[c.Chunk.Fingerprint]; ok { + return lbs } + lbs := dropLabels(c.Chunk.Metric, labels.MetricName).String() + l[c.Chunk.Fingerprint] = lbs + return lbs +} - // Now that we have the first chunk for each series loaded, - // we can proceed to filter the series that don't match. - chksBySeries = filterSeriesByMatchers(chksBySeries, it.matchers) +type logBatchIterator struct { + *batchChunkIterator - var allChunks []*LazyChunk - for _, series := range chksBySeries { - for _, chunks := range series { - allChunks = append(allChunks, chunks...) - } + ctx context.Context + matchers []*labels.Matcher + filter logql.LineFilter + labels labelCache +} + +func newLogBatchIterator( + ctx context.Context, + chunks []*LazyChunk, + batchSize int, + matchers []*labels.Matcher, + filter logql.LineFilter, + direction logproto.Direction, + start, end time.Time, +) (iter.EntryIterator, error) { + // __name__ is not something we filter by because it's a constant in loki + // and only used for upstream compatibility; therefore remove it. + // The same applies to the sharding label which is injected by the cortex storage code. + matchers = removeMatchersByName(matchers, labels.MetricName, astmapper.ShardLabel) + logbatch := &logBatchIterator{ + labels: map[model.Fingerprint]string{}, + matchers: matchers, + filter: filter, + ctx: ctx, } + batch := newBatchChunkIterator(ctx, chunks, batchSize, direction, start, end, logbatch.newChunksIterator) + logbatch.batchChunkIterator = batch + return logbatch, nil +} - // Finally we load all chunks not already loaded - if err := fetchLazyChunks(it.ctx, allChunks); err != nil { +func (it *logBatchIterator) Entry() logproto.Entry { + return it.curr.(iter.EntryIterator).Entry() +} + +// newChunksIterator creates an iterator over a set of lazychunks. +func (it *logBatchIterator) newChunksIterator(chunks []*LazyChunk, from, through time.Time, nextChunk *LazyChunk) (genericIterator, error) { + chksBySeries, err := fetchChunkBySeries(it.ctx, chunks, it.matchers) + if err != nil { return nil, err } @@ -284,10 +315,10 @@ func (it *batchChunkIterator) newChunksIterator(chunks []*LazyChunk, from, throu return nil, err } - return iter.NewHeapIterator(it.ctx, iters, it.req.Direction), nil + return iter.NewHeapIterator(it.ctx, iters, it.direction), nil } -func (it *batchChunkIterator) buildIterators(chks map[model.Fingerprint][][]*LazyChunk, from, through time.Time, nextChunk *LazyChunk) ([]iter.EntryIterator, error) { +func (it *logBatchIterator) buildIterators(chks map[model.Fingerprint][][]*LazyChunk, from, through time.Time, nextChunk *LazyChunk) ([]iter.EntryIterator, error) { result := make([]iter.EntryIterator, 0, len(chks)) for _, chunks := range chks { iterator, err := it.buildHeapIterator(chunks, from, through, nextChunk) @@ -300,34 +331,24 @@ func (it *batchChunkIterator) buildIterators(chks map[model.Fingerprint][][]*Laz return result, nil } -// computeLabels compute the labels string representation, uses a map to cache result per fingerprint. -func (it *batchChunkIterator) computeLabels(c *LazyChunk) string { - if lbs, ok := it.labels[c.Chunk.Fingerprint]; ok { - return lbs - } - lbs := dropLabels(c.Chunk.Metric, labels.MetricName).String() - it.labels[c.Chunk.Fingerprint] = lbs - return lbs -} - -func (it *batchChunkIterator) buildHeapIterator(chks [][]*LazyChunk, from, through time.Time, nextChunk *LazyChunk) (iter.EntryIterator, error) { +func (it *logBatchIterator) buildHeapIterator(chks [][]*LazyChunk, from, through time.Time, nextChunk *LazyChunk) (iter.EntryIterator, error) { result := make([]iter.EntryIterator, 0, len(chks)) // __name__ is only used for upstream compatibility and is hardcoded within loki. Strip it from the return label set. - labels := it.computeLabels(chks[0][0]) + labels := it.labels.computeLabels(chks[0][0]) for i := range chks { iterators := make([]iter.EntryIterator, 0, len(chks[i])) for j := range chks[i] { if !chks[i][j].IsValid { continue } - iterator, err := chks[i][j].Iterator(it.ctx, from, through, it.req.Direction, it.filter, nextChunk) + iterator, err := chks[i][j].Iterator(it.ctx, from, through, it.direction, it.filter, nextChunk) if err != nil { return nil, err } iterators = append(iterators, iterator) } - if it.req.Direction == logproto.BACKWARD { + if it.direction == logproto.BACKWARD { for i, j := 0, len(iterators)-1; i < j; i, j = i+1, j-1 { iterators[i], iterators[j] = iterators[j], iterators[i] } @@ -335,7 +356,137 @@ func (it *batchChunkIterator) buildHeapIterator(chks [][]*LazyChunk, from, throu result = append(result, iter.NewNonOverlappingIterator(iterators, labels)) } - return iter.NewHeapIterator(it.ctx, result, it.req.Direction), nil + return iter.NewHeapIterator(it.ctx, result, it.direction), nil +} + +type sampleBatchIterator struct { + *batchChunkIterator + + ctx context.Context + matchers []*labels.Matcher + filter logql.LineFilter + extractor logql.SampleExtractor + labels labelCache +} + +func newSampleBatchIterator( + ctx context.Context, + chunks []*LazyChunk, + batchSize int, + matchers []*labels.Matcher, + filter logql.LineFilter, + extractor logql.SampleExtractor, + start, end time.Time, +) (iter.SampleIterator, error) { + // __name__ is not something we filter by because it's a constant in loki + // and only used for upstream compatibility; therefore remove it. + // The same applies to the sharding label which is injected by the cortex storage code. + matchers = removeMatchersByName(matchers, labels.MetricName, astmapper.ShardLabel) + + samplebatch := &sampleBatchIterator{ + labels: map[model.Fingerprint]string{}, + matchers: matchers, + filter: filter, + extractor: extractor, + ctx: ctx, + } + batch := newBatchChunkIterator(ctx, chunks, batchSize, logproto.FORWARD, start, end, samplebatch.newChunksIterator) + samplebatch.batchChunkIterator = batch + return samplebatch, nil +} + +func (it *sampleBatchIterator) Sample() logproto.Sample { + return it.curr.(iter.SampleIterator).Sample() +} + +// newChunksIterator creates an iterator over a set of lazychunks. +func (it *sampleBatchIterator) newChunksIterator(chunks []*LazyChunk, from, through time.Time, nextChunk *LazyChunk) (genericIterator, error) { + chksBySeries, err := fetchChunkBySeries(it.ctx, chunks, it.matchers) + if err != nil { + return nil, err + } + iters, err := it.buildIterators(chksBySeries, from, through, nextChunk) + if err != nil { + return nil, err + } + + return iter.NewHeapSampleIterator(it.ctx, iters), nil +} + +func (it *sampleBatchIterator) buildIterators(chks map[model.Fingerprint][][]*LazyChunk, from, through time.Time, nextChunk *LazyChunk) ([]iter.SampleIterator, error) { + result := make([]iter.SampleIterator, 0, len(chks)) + for _, chunks := range chks { + iterator, err := it.buildHeapIterator(chunks, from, through, nextChunk) + if err != nil { + return nil, err + } + result = append(result, iterator) + } + + return result, nil +} + +func (it *sampleBatchIterator) buildHeapIterator(chks [][]*LazyChunk, from, through time.Time, nextChunk *LazyChunk) (iter.SampleIterator, error) { + result := make([]iter.SampleIterator, 0, len(chks)) + + // __name__ is only used for upstream compatibility and is hardcoded within loki. Strip it from the return label set. + labels := it.labels.computeLabels(chks[0][0]) + for i := range chks { + iterators := make([]iter.SampleIterator, 0, len(chks[i])) + for j := range chks[i] { + if !chks[i][j].IsValid { + continue + } + iterator, err := chks[i][j].SampleIterator(it.ctx, from, through, it.filter, it.extractor, nextChunk) + if err != nil { + return nil, err + } + iterators = append(iterators, iterator) + } + + result = append(result, iter.NewNonOverlappingSampleIterator(iterators, labels)) + } + + return iter.NewHeapSampleIterator(it.ctx, result), nil +} + +func removeMatchersByName(matchers []*labels.Matcher, names ...string) []*labels.Matcher { + for _, omit := range names { + for i := range matchers { + if matchers[i].Name == omit { + matchers = append(matchers[:i], matchers[i+1:]...) + break + } + } + } + return matchers +} + +func fetchChunkBySeries(ctx context.Context, chunks []*LazyChunk, matchers []*labels.Matcher) (map[model.Fingerprint][][]*LazyChunk, error) { + chksBySeries := partitionBySeriesChunks(chunks) + + // Make sure the initial chunks are loaded. This is not one chunk + // per series, but rather a chunk per non-overlapping iterator. + if err := loadFirstChunks(ctx, chksBySeries); err != nil { + return nil, err + } + + // Now that we have the first chunk for each series loaded, + // we can proceed to filter the series that don't match. + chksBySeries = filterSeriesByMatchers(chksBySeries, matchers) + + var allChunks []*LazyChunk + for _, series := range chksBySeries { + for _, chunks := range series { + allChunks = append(allChunks, chunks...) + } + } + + // Finally we load all chunks not already loaded + if err := fetchLazyChunks(ctx, allChunks); err != nil { + return nil, err + } + return chksBySeries, nil } func filterSeriesByMatchers(chks map[model.Fingerprint][][]*LazyChunk, matchers []*labels.Matcher) map[model.Fingerprint][][]*LazyChunk { diff --git a/pkg/storage/batch_test.go b/pkg/storage/batch_test.go index dc1c008a4c..19dbd5d284 100644 --- a/pkg/storage/batch_test.go +++ b/pkg/storage/batch_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/cespare/xxhash/v2" "github.com/cortexproject/cortex/pkg/chunk" "github.com/pkg/errors" "github.com/prometheus/common/model" @@ -21,7 +22,7 @@ import ( "github.com/grafana/loki/pkg/logql/stats" ) -func Test_newBatchChunkIterator(t *testing.T) { +func Test_newLogBatchChunkIterator(t *testing.T) { tests := map[string]struct { chunks []*LazyChunk @@ -552,7 +553,8 @@ func Test_newBatchChunkIterator(t *testing.T) { for name, tt := range tests { tt := tt t.Run(name, func(t *testing.T) { - it := newBatchChunkIterator(context.Background(), tt.chunks, tt.batchSize, newMatchers(tt.matchers), nil, newQuery("", tt.start, tt.end, tt.direction, nil)) + it, err := newLogBatchIterator(context.Background(), tt.chunks, tt.batchSize, newMatchers(tt.matchers), nil, tt.direction, tt.start, tt.end) + require.NoError(t, err) streams, _, err := iter.ReadBatch(it, 1000) _ = it.Close() if err != nil { @@ -565,6 +567,291 @@ func Test_newBatchChunkIterator(t *testing.T) { } } +func Test_newSampleBatchChunkIterator(t *testing.T) { + + tests := map[string]struct { + chunks []*LazyChunk + expected []logproto.Series + matchers string + start, end time.Time + batchSize int + }{ + "forward with overlap": { + []*LazyChunk{ + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from, + Line: "1", + }, + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + { + Timestamp: from.Add(4 * time.Millisecond), + Line: "5", + }, + }, + }), + }, + []logproto.Series{ + { + Labels: fooLabels, + Samples: []logproto.Sample{ + { + Timestamp: from.UnixNano(), + Hash: xxhash.Sum64String("1"), + Value: 1., + }, + { + Timestamp: from.Add(time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("2"), + Value: 1., + }, + { + Timestamp: from.Add(2 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("3"), + Value: 1., + }, + { + Timestamp: from.Add(3 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("4"), + Value: 1., + }, + }, + }, + }, + fooLabelsWithName, + from, from.Add(4 * time.Millisecond), + 2, + }, + "forward with overlapping non-continuous entries": { + []*LazyChunk{ + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from, + Line: "1", + }, + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + }, + []logproto.Series{ + { + Labels: fooLabels, + Samples: []logproto.Sample{ + { + Timestamp: from.UnixNano(), + Hash: xxhash.Sum64String("1"), + Value: 1., + }, + { + Timestamp: from.Add(time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("2"), + Value: 1., + }, + { + Timestamp: from.Add(2 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("3"), + Value: 1., + }, + }, + }, + }, + fooLabelsWithName, + from, from.Add(3 * time.Millisecond), + 2, + }, + "forward without overlap": { + []*LazyChunk{ + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from, + Line: "1", + }, + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + }, + []logproto.Series{ + { + Labels: fooLabels, + Samples: []logproto.Sample{ + { + Timestamp: from.UnixNano(), + Hash: xxhash.Sum64String("1"), + Value: 1., + }, + { + Timestamp: from.Add(time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("2"), + Value: 1., + }, + { + Timestamp: from.Add(2 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("3"), + Value: 1., + }, + }, + }, + }, + fooLabelsWithName, + from, from.Add(3 * time.Millisecond), + 2, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + it, err := newSampleBatchIterator(context.Background(), tt.chunks, tt.batchSize, newMatchers(tt.matchers), nil, logql.ExtractCount, tt.start, tt.end) + require.NoError(t, err) + series, _, err := iter.ReadSampleBatch(it, 1000) + _ = it.Close() + if err != nil { + t.Fatalf("error reading batch %s", err) + } + + assertSeries(t, tt.expected, series.Series) + + }) + } +} + func TestPartitionOverlappingchunks(t *testing.T) { var ( oneThroughFour = newLazyChunk(logproto.Stream{ @@ -754,9 +1041,11 @@ func TestBuildHeapIterator(t *testing.T) { } { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { ctx = user.InjectOrgID(context.Background(), "test-user") - b := &batchChunkIterator{ + b := &logBatchIterator{ + batchChunkIterator: &batchChunkIterator{ + direction: logproto.FORWARD, + }, ctx: ctx, - req: &logproto.QueryRequest{Direction: logproto.FORWARD}, labels: map[model.Fingerprint]string{}, } it, err := b.buildHeapIterator(tc.input, from, from.Add(6*time.Millisecond), nil) @@ -764,7 +1053,7 @@ func TestBuildHeapIterator(t *testing.T) { t.Errorf("buildHeapIterator error = %v", err) return } - req := newQuery("{foo=\"bar\"}", from, from.Add(6*time.Millisecond), logproto.FORWARD, nil) + req := newQuery("{foo=\"bar\"}", from, from.Add(6*time.Millisecond), nil) streams, _, err := iter.ReadBatch(it, req.Limit) _ = it.Close() if err != nil { @@ -864,7 +1153,7 @@ func Benchmark_store_OverlappingChunks(b *testing.B) { ctx := user.InjectOrgID(stats.NewContext(context.Background()), "fake") start := time.Now() for i := 0; i < b.N; i++ { - it, err := st.LazyQuery(ctx, logql.SelectParams{QueryRequest: &logproto.QueryRequest{ + it, err := st.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &logproto.QueryRequest{ Selector: `{foo="bar"}`, Direction: logproto.BACKWARD, Limit: 0, diff --git a/pkg/storage/cache.go b/pkg/storage/cache.go index db69e8d11f..41070b4206 100644 --- a/pkg/storage/cache.go +++ b/pkg/storage/cache.go @@ -92,3 +92,90 @@ func (it *cachedIterator) Close() error { it.reset() return it.closeErr } + +// cachedIterator is an iterator that caches iteration to be replayed later on. +type cachedSampleIterator struct { + cache []logproto.Sample + base iter.SampleIterator + + labels string + curr int + + closeErr error + iterErr error +} + +// newSampleCachedIterator creates an iterator that cache iteration result and can be iterated again +// after closing it without re-using the underlaying iterator `it`. +// The cache iterator should be used for entries that belongs to the same stream only. +func newCachedSampleIterator(it iter.SampleIterator, cap int) *cachedSampleIterator { + c := &cachedSampleIterator{ + base: it, + cache: make([]logproto.Sample, 0, cap), + curr: -1, + } + c.load() + return c +} + +func (it *cachedSampleIterator) reset() { + it.curr = -1 +} + +func (it *cachedSampleIterator) load() { + if it.base != nil { + defer func() { + it.closeErr = it.base.Close() + it.iterErr = it.base.Error() + it.base = nil + it.reset() + }() + // set labels using the first entry + if !it.base.Next() { + return + } + it.labels = it.base.Labels() + + // add all entries until the base iterator is exhausted + for { + it.cache = append(it.cache, it.base.Sample()) + if !it.base.Next() { + break + } + } + + } +} + +func (it *cachedSampleIterator) Next() bool { + if len(it.cache) == 0 { + it.cache = nil + return false + } + if it.curr+1 >= len(it.cache) { + return false + } + it.curr++ + return it.curr < len(it.cache) +} + +func (it *cachedSampleIterator) Sample() logproto.Sample { + if len(it.cache) == 0 { + return logproto.Sample{} + } + if it.curr < 0 { + return it.cache[0] + } + return it.cache[it.curr] +} + +func (it *cachedSampleIterator) Labels() string { + return it.labels +} + +func (it *cachedSampleIterator) Error() error { return it.iterErr } + +func (it *cachedSampleIterator) Close() error { + it.reset() + return it.closeErr +} diff --git a/pkg/storage/cache_test.go b/pkg/storage/cache_test.go index cdc7f7d473..40cb45220e 100644 --- a/pkg/storage/cache_test.go +++ b/pkg/storage/cache_test.go @@ -77,10 +77,77 @@ func Test_ErrorCachedIterator(t *testing.T) { require.Equal(t, errors.New("close"), c.Close()) } +func Test_CachedSampleIterator(t *testing.T) { + series := logproto.Series{ + Labels: `{foo="bar"}`, + Samples: []logproto.Sample{ + {Timestamp: time.Unix(0, 1).UnixNano(), Hash: 1, Value: 1.}, + {Timestamp: time.Unix(0, 2).UnixNano(), Hash: 2, Value: 2.}, + {Timestamp: time.Unix(0, 3).UnixNano(), Hash: 3, Value: 3.}, + }, + } + c := newCachedSampleIterator(iter.NewSeriesIterator(series), 3) + + assert := func() { + // we should crash for call of entry without next although that's not expected. + require.Equal(t, series.Labels, c.Labels()) + require.Equal(t, series.Samples[0], c.Sample()) + require.Equal(t, true, c.Next()) + require.Equal(t, series.Samples[0], c.Sample()) + require.Equal(t, true, c.Next()) + require.Equal(t, series.Samples[1], c.Sample()) + require.Equal(t, true, c.Next()) + require.Equal(t, series.Samples[2], c.Sample()) + require.Equal(t, false, c.Next()) + require.Equal(t, nil, c.Error()) + require.Equal(t, series.Samples[2], c.Sample()) + require.Equal(t, false, c.Next()) + } + + assert() + + // Close the iterator reset it to the beginning. + require.Equal(t, nil, c.Close()) + + assert() +} + +func Test_EmptyCachedSampleIterator(t *testing.T) { + + c := newCachedSampleIterator(iter.NoopIterator, 0) + + require.Equal(t, "", c.Labels()) + require.Equal(t, logproto.Sample{}, c.Sample()) + require.Equal(t, false, c.Next()) + require.Equal(t, "", c.Labels()) + require.Equal(t, logproto.Sample{}, c.Sample()) + + require.Equal(t, nil, c.Close()) + + require.Equal(t, "", c.Labels()) + require.Equal(t, logproto.Sample{}, c.Sample()) + require.Equal(t, false, c.Next()) + require.Equal(t, "", c.Labels()) + require.Equal(t, logproto.Sample{}, c.Sample()) + +} + +func Test_ErrorCachedSampleIterator(t *testing.T) { + + c := newCachedSampleIterator(&errorIter{}, 0) + + require.Equal(t, false, c.Next()) + require.Equal(t, "", c.Labels()) + require.Equal(t, logproto.Sample{}, c.Sample()) + require.Equal(t, errors.New("error"), c.Error()) + require.Equal(t, errors.New("close"), c.Close()) +} + type errorIter struct{} -func (errorIter) Next() bool { return false } -func (errorIter) Error() error { return errors.New("error") } -func (errorIter) Labels() string { return "" } -func (errorIter) Entry() logproto.Entry { return logproto.Entry{} } -func (errorIter) Close() error { return errors.New("close") } +func (errorIter) Next() bool { return false } +func (errorIter) Error() error { return errors.New("error") } +func (errorIter) Labels() string { return "" } +func (errorIter) Entry() logproto.Entry { return logproto.Entry{} } +func (errorIter) Sample() logproto.Sample { return logproto.Sample{} } +func (errorIter) Close() error { return errors.New("close") } diff --git a/pkg/storage/lazy_chunk.go b/pkg/storage/lazy_chunk.go index 366eefe2f5..709442e121 100644 --- a/pkg/storage/lazy_chunk.go +++ b/pkg/storage/lazy_chunk.go @@ -21,7 +21,8 @@ type LazyChunk struct { // cache of overlapping block. // We use the offset of the block as key since it's unique per chunk. - overlappingBlocks map[int]*cachedIterator + overlappingBlocks map[int]*cachedIterator + overlappingSampleBlocks map[int]*cachedSampleIterator } // Iterator returns an entry iterator. @@ -86,6 +87,62 @@ func (c *LazyChunk) Iterator( return iter.NewEntryReversedIter(iterForward) } +// SampleIterator returns an sample iterator. +// The iterator returned will cache overlapping block's entries with the next chunk if passed. +// This way when we re-use them for ordering across batches we don't re-decompress the data again. +func (c *LazyChunk) SampleIterator( + ctx context.Context, + from, through time.Time, + filter logql.LineFilter, + extractor logql.SampleExtractor, + nextChunk *LazyChunk, +) (iter.SampleIterator, error) { + + // If the chunk is not already loaded, then error out. + if c.Chunk.Data == nil { + return nil, errors.New("chunk is not loaded") + } + + lokiChunk := c.Chunk.Data.(*chunkenc.Facade).LokiChunk() + blocks := lokiChunk.Blocks(from, through) + if len(blocks) == 0 { + return iter.NoopIterator, nil + } + its := make([]iter.SampleIterator, 0, len(blocks)) + + for _, b := range blocks { + // if we have already processed and cache block let's use it. + if cache, ok := c.overlappingSampleBlocks[b.Offset()]; ok { + clone := *cache + clone.reset() + its = append(its, &clone) + continue + } + // if the block is overlapping cache it with the next chunk boundaries. + if nextChunk != nil && IsBlockOverlapping(b, nextChunk, logproto.FORWARD) { + it := newCachedSampleIterator(b.SampleIterator(ctx, filter, extractor), b.Entries()) + its = append(its, it) + if c.overlappingSampleBlocks == nil { + c.overlappingSampleBlocks = make(map[int]*cachedSampleIterator) + } + c.overlappingSampleBlocks[b.Offset()] = it + continue + } + if nextChunk != nil { + delete(c.overlappingBlocks, b.Offset()) + } + // non-overlapping block with the next chunk are not cached. + its = append(its, b.SampleIterator(ctx, filter, extractor)) + } + + // build the final iterator bound to the requested time range. + return iter.NewTimeRangedSampleIterator( + iter.NewNonOverlappingSampleIterator(its, ""), + from.UnixNano(), + through.UnixNano(), + ), nil +} + func IsBlockOverlapping(b chunkenc.Block, with *LazyChunk, direction logproto.Direction) bool { if direction == logproto.BACKWARD { through := int64(with.Chunk.Through) * int64(time.Millisecond) diff --git a/pkg/storage/lazy_chunk_test.go b/pkg/storage/lazy_chunk_test.go index 7fefd874a2..9495e43353 100644 --- a/pkg/storage/lazy_chunk_test.go +++ b/pkg/storage/lazy_chunk_test.go @@ -99,11 +99,12 @@ type fakeBlock struct { mint, maxt int64 } -func (fakeBlock) Entries() int { return 0 } -func (fakeBlock) Offset() int { return 0 } -func (f fakeBlock) MinTime() int64 { return f.mint } -func (f fakeBlock) MaxTime() int64 { return f.maxt } -func (fakeBlock) Iterator(context.Context, logql.LineFilter) iter.EntryIterator { +func (fakeBlock) Entries() int { return 0 } +func (fakeBlock) Offset() int { return 0 } +func (f fakeBlock) MinTime() int64 { return f.mint } +func (f fakeBlock) MaxTime() int64 { return f.maxt } +func (fakeBlock) Iterator(context.Context, logql.LineFilter) iter.EntryIterator { return nil } +func (fakeBlock) SampleIterator(context.Context, logql.LineFilter, logql.SampleExtractor) iter.SampleIterator { return nil } diff --git a/pkg/storage/store.go b/pkg/storage/store.go index 4c9f2b5445..c14970766f 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -39,8 +39,9 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // Store is the Loki chunk store to retrieve and save chunks. type Store interface { chunk.Store - LazyQuery(ctx context.Context, req logql.SelectParams) (iter.EntryIterator, error) - GetSeries(ctx context.Context, req logql.SelectParams) ([]logproto.SeriesIdentifier, error) + SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) + SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) + GetSeries(ctx context.Context, req logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) } type store struct { @@ -72,7 +73,7 @@ func NewTableClient(name string, cfg Config) (chunk.TableClient, error) { // decodeReq sanitizes an incoming request, rounds bounds, appends the __name__ matcher, // and adds the "__cortex_shard__" label if this is a sharded query. -func decodeReq(req logql.SelectParams) ([]*labels.Matcher, logql.LineFilter, model.Time, model.Time, error) { +func decodeReq(req logql.QueryParams) ([]*labels.Matcher, logql.LineFilter, model.Time, model.Time, error) { expr, err := req.LogSelector() if err != nil { return nil, nil, 0, 0, err @@ -113,7 +114,7 @@ func decodeReq(req logql.SelectParams) ([]*labels.Matcher, logql.LineFilter, mod } } - from, through := util.RoundToMilliseconds(req.Start, req.End) + from, through := util.RoundToMilliseconds(req.GetStart(), req.GetEnd()) return matchers, filter, from, through, nil } @@ -147,7 +148,7 @@ func (s *store) lazyChunks(ctx context.Context, matchers []*labels.Matcher, from return lazyChunks, nil } -func (s *store) GetSeries(ctx context.Context, req logql.SelectParams) ([]logproto.SeriesIdentifier, error) { +func (s *store) GetSeries(ctx context.Context, req logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) { var from, through model.Time var matchers []*labels.Matcher @@ -227,9 +228,9 @@ func (s *store) GetSeries(ctx context.Context, req logql.SelectParams) ([]logpro } -// LazyQuery returns an iterator that will query the store for more chunks while iterating instead of fetching all chunks upfront +// SelectLogs returns an iterator that will query the store for more chunks while iterating instead of fetching all chunks upfront // for that request. -func (s *store) LazyQuery(ctx context.Context, req logql.SelectParams) (iter.EntryIterator, error) { +func (s *store) SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) { matchers, filter, from, through, err := decodeReq(req) if err != nil { return nil, err @@ -244,10 +245,37 @@ func (s *store) LazyQuery(ctx context.Context, req logql.SelectParams) (iter.Ent return iter.NoopIterator, nil } - return newBatchChunkIterator(ctx, lazyChunks, s.cfg.MaxChunkBatchSize, matchers, filter, req.QueryRequest), nil + return newLogBatchIterator(ctx, lazyChunks, s.cfg.MaxChunkBatchSize, matchers, filter, req.Direction, req.Start, req.End) } +func (s *store) SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) { + matchers, filter, from, through, err := decodeReq(req) + if err != nil { + return nil, err + } + + expr, err := req.Expr() + if err != nil { + return nil, err + } + + extractor, err := expr.Extractor() + if err != nil { + return nil, err + } + + lazyChunks, err := s.lazyChunks(ctx, matchers, from, through) + if err != nil { + return nil, err + } + + if len(lazyChunks) == 0 { + return iter.NoopIterator, nil + } + return newSampleBatchIterator(ctx, lazyChunks, s.cfg.MaxChunkBatchSize, matchers, filter, extractor, req.Start, req.End) +} + func filterChunksByTime(from, through model.Time, chunks []chunk.Chunk) []chunk.Chunk { filtered := make([]chunk.Chunk, 0, len(chunks)) for _, chunk := range chunks { diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go index c85d0561de..c668a7c4ec 100644 --- a/pkg/storage/store_test.go +++ b/pkg/storage/store_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/stretchr/testify/require" @@ -39,9 +40,9 @@ var ( ) //go test -bench=. -benchmem -memprofile memprofile.out -cpuprofile profile.out -func Benchmark_store_LazyQueryRegexBackward(b *testing.B) { +func Benchmark_store_SelectLogsRegexBackward(b *testing.B) { benchmarkStoreQuery(b, &logproto.QueryRequest{ - Selector: `{foo="bar"} |= "fuzz"`, + Selector: `{foo="bar"} |~ "fuzz"`, Limit: 1000, Start: time.Unix(0, start.UnixNano()), End: time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()), @@ -49,7 +50,7 @@ func Benchmark_store_LazyQueryRegexBackward(b *testing.B) { }) } -func Benchmark_store_LazyQueryLogQLBackward(b *testing.B) { +func Benchmark_store_SelectLogsLogQLBackward(b *testing.B) { benchmarkStoreQuery(b, &logproto.QueryRequest{ Selector: `{foo="bar"} |= "test" != "toto" |= "fuzz"`, Limit: 1000, @@ -59,9 +60,9 @@ func Benchmark_store_LazyQueryLogQLBackward(b *testing.B) { }) } -func Benchmark_store_LazyQueryRegexForward(b *testing.B) { +func Benchmark_store_SelectLogsRegexForward(b *testing.B) { benchmarkStoreQuery(b, &logproto.QueryRequest{ - Selector: `{foo="bar"} |= "fuzz"`, + Selector: `{foo="bar"} |~ "fuzz"`, Limit: 1000, Start: time.Unix(0, start.UnixNano()), End: time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()), @@ -69,7 +70,7 @@ func Benchmark_store_LazyQueryRegexForward(b *testing.B) { }) } -func Benchmark_store_LazyQueryForward(b *testing.B) { +func Benchmark_store_SelectLogsForward(b *testing.B) { benchmarkStoreQuery(b, &logproto.QueryRequest{ Selector: `{foo="bar"}`, Limit: 1000, @@ -79,7 +80,7 @@ func Benchmark_store_LazyQueryForward(b *testing.B) { }) } -func Benchmark_store_LazyQueryBackward(b *testing.B) { +func Benchmark_store_SelectLogsBackward(b *testing.B) { benchmarkStoreQuery(b, &logproto.QueryRequest{ Selector: `{foo="bar"}`, Limit: 1000, @@ -89,6 +90,37 @@ func Benchmark_store_LazyQueryBackward(b *testing.B) { }) } +// rm -Rf /tmp/benchmark/chunks/ /tmp/benchmark/index +// go run -mod=vendor ./pkg/storage/hack/main.go +// go test -benchmem -run=^$ -mod=vendor ./pkg/storage -bench=Benchmark_store_SelectSample -memprofile memprofile.out -cpuprofile cpuprofile.out +func Benchmark_store_SelectSample(b *testing.B) { + var sampleRes []logproto.Sample + for _, test := range []string{ + `count_over_time({foo="bar"}[5m])`, + `rate({foo="bar"}[5m])`, + `bytes_rate({foo="bar"}[5m])`, + `bytes_over_time({foo="bar"}[5m])`, + } { + b.Run(test, func(b *testing.B) { + for i := 0; i < b.N; i++ { + iter, err := chunkStore.SelectSamples(ctx, logql.SelectSampleParams{ + SampleQueryRequest: newSampleQuery(test, time.Unix(0, start.UnixNano()), time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano())), + }) + if err != nil { + b.Fatal(err) + } + + for iter.Next() { + sampleRes = append(sampleRes, iter.Sample()) + } + iter.Close() + } + }) + } + log.Print("sample processed ", len(sampleRes)) + +} + func benchmarkStoreQuery(b *testing.B, query *logproto.QueryRequest) { b.ReportAllocs() // force to run gc 10x more often this can be useful to detect fast allocation vs leak. @@ -111,7 +143,7 @@ func benchmarkStoreQuery(b *testing.B, query *logproto.QueryRequest) { } }() for i := 0; i < b.N; i++ { - iter, err := chunkStore.LazyQuery(ctx, logql.SelectParams{QueryRequest: query}) + iter, err := chunkStore.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: query}) if err != nil { b.Fatal(err) } @@ -180,7 +212,7 @@ func getLocalStore() Store { return store } -func Test_store_LazyQuery(t *testing.T) { +func Test_store_SelectLogs(t *testing.T) { tests := []struct { name string @@ -189,7 +221,7 @@ func Test_store_LazyQuery(t *testing.T) { }{ { "all", - newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), logproto.FORWARD, nil), + newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), nil), []logproto.Stream{ { Labels: "{foo=\"bar\"}", @@ -257,7 +289,7 @@ func Test_store_LazyQuery(t *testing.T) { }, { "filter regex", - newQuery("{foo=~\"ba.*\"} |~ \"1|2|3\" !~ \"2|3\"", from, from.Add(6*time.Millisecond), logproto.FORWARD, nil), + newQuery("{foo=~\"ba.*\"} |~ \"1|2|3\" !~ \"2|3\"", from, from.Add(6*time.Millisecond), nil), []logproto.Stream{ { Labels: "{foo=\"bar\"}", @@ -281,7 +313,7 @@ func Test_store_LazyQuery(t *testing.T) { }, { "filter matcher", - newQuery("{foo=\"bar\"}", from, from.Add(6*time.Millisecond), logproto.FORWARD, nil), + newQuery("{foo=\"bar\"}", from, from.Add(6*time.Millisecond), nil), []logproto.Stream{ { Labels: "{foo=\"bar\"}", @@ -318,7 +350,7 @@ func Test_store_LazyQuery(t *testing.T) { }, { "filter time", - newQuery("{foo=~\"ba.*\"}", from, from.Add(time.Millisecond), logproto.FORWARD, nil), + newQuery("{foo=~\"ba.*\"}", from, from.Add(time.Millisecond), nil), []logproto.Stream{ { Labels: "{foo=\"bar\"}", @@ -351,7 +383,7 @@ func Test_store_LazyQuery(t *testing.T) { } ctx = user.InjectOrgID(context.Background(), "test-user") - it, err := s.LazyQuery(ctx, logql.SelectParams{QueryRequest: tt.req}) + it, err := s.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: tt.req}) if err != nil { t.Errorf("store.LazyQuery() error = %v", err) return @@ -367,6 +399,215 @@ func Test_store_LazyQuery(t *testing.T) { } } +func Test_store_SelectSample(t *testing.T) { + + tests := []struct { + name string + req *logproto.SampleQueryRequest + expected []logproto.Series + }{ + { + "all", + newSampleQuery("count_over_time({foo=~\"ba.*\"}[5m])", from, from.Add(6*time.Millisecond)), + []logproto.Series{ + { + Labels: "{foo=\"bar\"}", + Samples: []logproto.Sample{ + { + Timestamp: from.UnixNano(), + Hash: xxhash.Sum64String("1"), + Value: 1., + }, + + { + Timestamp: from.Add(time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("2"), + Value: 1., + }, + { + Timestamp: from.Add(2 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("3"), + Value: 1., + }, + { + Timestamp: from.Add(3 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("4"), + Value: 1., + }, + + { + Timestamp: from.Add(4 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("5"), + Value: 1., + }, + { + Timestamp: from.Add(5 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("6"), + Value: 1., + }, + }, + }, + { + Labels: "{foo=\"bazz\"}", + Samples: []logproto.Sample{ + { + Timestamp: from.UnixNano(), + Hash: xxhash.Sum64String("1"), + Value: 1., + }, + + { + Timestamp: from.Add(time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("2"), + Value: 1., + }, + { + Timestamp: from.Add(2 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("3"), + Value: 1., + }, + { + Timestamp: from.Add(3 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("4"), + Value: 1., + }, + + { + Timestamp: from.Add(4 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("5"), + Value: 1., + }, + { + Timestamp: from.Add(5 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("6"), + Value: 1., + }, + }, + }, + }, + }, + { + "filter regex", + newSampleQuery("rate({foo=~\"ba.*\"} |~ \"1|2|3\" !~ \"2|3\"[1m])", from, from.Add(6*time.Millisecond)), + []logproto.Series{ + { + Labels: "{foo=\"bar\"}", + Samples: []logproto.Sample{ + { + Timestamp: from.UnixNano(), + Hash: xxhash.Sum64String("1"), + Value: 1., + }, + }, + }, + { + Labels: "{foo=\"bazz\"}", + Samples: []logproto.Sample{ + { + Timestamp: from.UnixNano(), + Hash: xxhash.Sum64String("1"), + Value: 1., + }, + }, + }, + }, + }, + { + "filter matcher", + newSampleQuery("count_over_time({foo=\"bar\"}[10m])", from, from.Add(6*time.Millisecond)), + []logproto.Series{ + { + Labels: "{foo=\"bar\"}", + Samples: []logproto.Sample{ + { + Timestamp: from.UnixNano(), + Hash: xxhash.Sum64String("1"), + Value: 1., + }, + + { + Timestamp: from.Add(time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("2"), + Value: 1., + }, + { + Timestamp: from.Add(2 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("3"), + Value: 1., + }, + { + Timestamp: from.Add(3 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("4"), + Value: 1., + }, + + { + Timestamp: from.Add(4 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("5"), + Value: 1., + }, + { + Timestamp: from.Add(5 * time.Millisecond).UnixNano(), + Hash: xxhash.Sum64String("6"), + Value: 1., + }, + }, + }, + }, + }, + { + "filter time", + newSampleQuery("count_over_time({foo=~\"ba.*\"}[1s])", from, from.Add(time.Millisecond)), + []logproto.Series{ + { + Labels: "{foo=\"bar\"}", + Samples: []logproto.Sample{ + { + Timestamp: from.UnixNano(), + Hash: xxhash.Sum64String("1"), + Value: 1., + }, + }, + }, + { + Labels: "{foo=\"bazz\"}", + Samples: []logproto.Sample{ + { + Timestamp: from.UnixNano(), + Hash: xxhash.Sum64String("1"), + Value: 1., + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &store{ + Store: storeFixture, + cfg: Config{ + MaxChunkBatchSize: 10, + }, + } + + ctx = user.InjectOrgID(context.Background(), "test-user") + it, err := s.SelectSamples(ctx, logql.SelectSampleParams{SampleQueryRequest: tt.req}) + if err != nil { + t.Errorf("store.LazyQuery() error = %v", err) + return + } + + series, _, err := iter.ReadSampleBatch(it, uint32(100000)) + _ = it.Close() + if err != nil { + t.Fatalf("error reading batch %s", err) + } + assertSeries(t, tt.expected, series.Series) + }) + } +} + func Test_store_GetSeries(t *testing.T) { tests := []struct { @@ -377,7 +618,7 @@ func Test_store_GetSeries(t *testing.T) { }{ { "all", - newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), logproto.FORWARD, nil), + newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), nil), []logproto.SeriesIdentifier{ {Labels: mustParseLabels("{foo=\"bar\"}")}, {Labels: mustParseLabels("{foo=\"bazz\"}")}, @@ -386,7 +627,7 @@ func Test_store_GetSeries(t *testing.T) { }, { "all-single-batch", - newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), logproto.FORWARD, nil), + newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), nil), []logproto.SeriesIdentifier{ {Labels: mustParseLabels("{foo=\"bar\"}")}, {Labels: mustParseLabels("{foo=\"bazz\"}")}, @@ -395,7 +636,7 @@ func Test_store_GetSeries(t *testing.T) { }, { "regexp filter (post chunk fetching)", - newQuery("{foo=~\"bar.*\"}", from, from.Add(6*time.Millisecond), logproto.FORWARD, nil), + newQuery("{foo=~\"bar.*\"}", from, from.Add(6*time.Millisecond), nil), []logproto.SeriesIdentifier{ {Labels: mustParseLabels("{foo=\"bar\"}")}, }, @@ -403,7 +644,7 @@ func Test_store_GetSeries(t *testing.T) { }, { "filter matcher", - newQuery("{foo=\"bar\"}", from, from.Add(6*time.Millisecond), logproto.FORWARD, nil), + newQuery("{foo=\"bar\"}", from, from.Add(6*time.Millisecond), nil), []logproto.SeriesIdentifier{ {Labels: mustParseLabels("{foo=\"bar\"}")}, }, @@ -419,7 +660,7 @@ func Test_store_GetSeries(t *testing.T) { }, } ctx = user.InjectOrgID(context.Background(), "test-user") - out, err := s.GetSeries(ctx, logql.SelectParams{QueryRequest: tt.req}) + out, err := s.GetSeries(ctx, logql.SelectLogParams{QueryRequest: tt.req}) if err != nil { t.Errorf("store.GetSeries() error = %v", err) return @@ -437,7 +678,7 @@ func Test_store_decodeReq_Matchers(t *testing.T) { }{ { "unsharded", - newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), logproto.FORWARD, nil), + newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), nil), []*labels.Matcher{ labels.MustNewMatcher(labels.MatchRegexp, "foo", "ba.*"), labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "logs"), @@ -446,7 +687,7 @@ func Test_store_decodeReq_Matchers(t *testing.T) { { "unsharded", newQuery( - "{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), logproto.FORWARD, + "{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), []astmapper.ShardAnnotation{ {Shard: 1, Of: 2}, }, @@ -464,7 +705,7 @@ func Test_store_decodeReq_Matchers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ms, _, _, _, err := decodeReq(logql.SelectParams{QueryRequest: tt.req}) + ms, _, _, _, err := decodeReq(logql.SelectLogParams{QueryRequest: tt.req}) if err != nil { t.Errorf("store.GetSeries() error = %v", err) return diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go index d1274485cb..15771cd625 100644 --- a/pkg/storage/util_test.go +++ b/pkg/storage/util_test.go @@ -47,6 +47,28 @@ func assertStream(t *testing.T, expected, actual []logproto.Stream) { } } +func assertSeries(t *testing.T, expected, actual []logproto.Series) { + if len(expected) != len(actual) { + t.Fatalf("error stream length are different expected %d actual %d\n%s", len(expected), len(actual), spew.Sdump(expected, actual)) + return + } + sort.Slice(expected, func(i int, j int) bool { return expected[i].Labels < expected[j].Labels }) + sort.Slice(actual, func(i int, j int) bool { return actual[i].Labels < actual[j].Labels }) + for i := range expected { + assert.Equal(t, expected[i].Labels, actual[i].Labels) + if len(expected[i].Samples) != len(actual[i].Samples) { + t.Fatalf("error entries length are different expected %d actual%d\n%s", len(expected[i].Samples), len(actual[i].Samples), spew.Sdump(expected[i].Samples, actual[i].Samples)) + + return + } + for j := range expected[i].Samples { + assert.Equal(t, expected[i].Samples[j].Timestamp, actual[i].Samples[j].Timestamp) + assert.Equal(t, expected[i].Samples[j].Value, actual[i].Samples[j].Value) + assert.Equal(t, expected[i].Samples[j].Hash, actual[i].Samples[j].Hash) + } + } +} + func newLazyChunk(stream logproto.Stream) *LazyChunk { return &LazyChunk{ Fetcher: nil, @@ -102,13 +124,13 @@ func newMatchers(matchers string) []*labels.Matcher { return res } -func newQuery(query string, start, end time.Time, direction logproto.Direction, shards []astmapper.ShardAnnotation) *logproto.QueryRequest { +func newQuery(query string, start, end time.Time, shards []astmapper.ShardAnnotation) *logproto.QueryRequest { req := &logproto.QueryRequest{ Selector: query, Start: start, Limit: 1000, End: end, - Direction: direction, + Direction: logproto.FORWARD, } for _, shard := range shards { req.Shards = append(req.Shards, shard.String()) @@ -116,6 +138,15 @@ func newQuery(query string, start, end time.Time, direction logproto.Direction, return req } +func newSampleQuery(query string, start, end time.Time) *logproto.SampleQueryRequest { + req := &logproto.SampleQueryRequest{ + Selector: query, + Start: start, + End: end, + } + return req +} + type mockChunkStore struct { chunks []chunk.Chunk client *mockChunkStoreClient diff --git a/vendor/github.com/segmentio/fasthash/fnv1a/hash.go b/vendor/github.com/segmentio/fasthash/fnv1a/hash.go index d2f3c96684..92849b1142 100644 --- a/vendor/github.com/segmentio/fasthash/fnv1a/hash.go +++ b/vendor/github.com/segmentio/fasthash/fnv1a/hash.go @@ -14,6 +14,11 @@ func HashString64(s string) uint64 { return AddString64(Init64, s) } +// HashBytes64 returns the hash of u. +func HashBytes64(b []byte) uint64 { + return AddBytes64(Init64, b) +} + // HashUint64 returns the hash of u. func HashUint64(u uint64) uint64 { return AddUint64(Init64, u) @@ -34,24 +39,69 @@ func AddString64(h uint64, s string) uint64 { - BenchmarkHash64/hash_function-4 50000000 38.6 ns/op 932.35 MB/s 0 B/op 0 allocs/op */ + for len(s) >= 8 { + h = (h ^ uint64(s[0])) * prime64 + h = (h ^ uint64(s[1])) * prime64 + h = (h ^ uint64(s[2])) * prime64 + h = (h ^ uint64(s[3])) * prime64 + h = (h ^ uint64(s[4])) * prime64 + h = (h ^ uint64(s[5])) * prime64 + h = (h ^ uint64(s[6])) * prime64 + h = (h ^ uint64(s[7])) * prime64 + s = s[8:] + } + + if len(s) >= 4 { + h = (h ^ uint64(s[0])) * prime64 + h = (h ^ uint64(s[1])) * prime64 + h = (h ^ uint64(s[2])) * prime64 + h = (h ^ uint64(s[3])) * prime64 + s = s[4:] + } + + if len(s) >= 2 { + h = (h ^ uint64(s[0])) * prime64 + h = (h ^ uint64(s[1])) * prime64 + s = s[2:] + } + + if len(s) > 0 { + h = (h ^ uint64(s[0])) * prime64 + } + + return h +} + +// AddBytes64 adds the hash of b to the precomputed hash value h. +func AddBytes64(h uint64, b []byte) uint64 { + for len(b) >= 8 { + h = (h ^ uint64(b[0])) * prime64 + h = (h ^ uint64(b[1])) * prime64 + h = (h ^ uint64(b[2])) * prime64 + h = (h ^ uint64(b[3])) * prime64 + h = (h ^ uint64(b[4])) * prime64 + h = (h ^ uint64(b[5])) * prime64 + h = (h ^ uint64(b[6])) * prime64 + h = (h ^ uint64(b[7])) * prime64 + b = b[8:] + } + + if len(b) >= 4 { + h = (h ^ uint64(b[0])) * prime64 + h = (h ^ uint64(b[1])) * prime64 + h = (h ^ uint64(b[2])) * prime64 + h = (h ^ uint64(b[3])) * prime64 + b = b[4:] + } - i := 0 - n := (len(s) / 8) * 8 - - for i != n { - h = (h ^ uint64(s[i])) * prime64 - h = (h ^ uint64(s[i+1])) * prime64 - h = (h ^ uint64(s[i+2])) * prime64 - h = (h ^ uint64(s[i+3])) * prime64 - h = (h ^ uint64(s[i+4])) * prime64 - h = (h ^ uint64(s[i+5])) * prime64 - h = (h ^ uint64(s[i+6])) * prime64 - h = (h ^ uint64(s[i+7])) * prime64 - i += 8 + if len(b) >= 2 { + h = (h ^ uint64(b[0])) * prime64 + h = (h ^ uint64(b[1])) * prime64 + b = b[2:] } - for _, c := range s[i:] { - h = (h ^ uint64(c)) * prime64 + if len(b) > 0 { + h = (h ^ uint64(b[0])) * prime64 } return h diff --git a/vendor/github.com/segmentio/fasthash/fnv1a/hash32.go b/vendor/github.com/segmentio/fasthash/fnv1a/hash32.go index df1f3e5b07..ac91e24703 100644 --- a/vendor/github.com/segmentio/fasthash/fnv1a/hash32.go +++ b/vendor/github.com/segmentio/fasthash/fnv1a/hash32.go @@ -14,6 +14,11 @@ func HashString32(s string) uint32 { return AddString32(Init32, s) } +// HashBytes32 returns the hash of u. +func HashBytes32(b []byte) uint32 { + return AddBytes32(Init32, b) +} + // HashUint32 returns the hash of u. func HashUint32(u uint32) uint32 { return AddUint32(Init32, u) @@ -21,23 +26,69 @@ func HashUint32(u uint32) uint32 { // AddString32 adds the hash of s to the precomputed hash value h. func AddString32(h uint32, s string) uint32 { - i := 0 - n := (len(s) / 8) * 8 - - for i != n { - h = (h ^ uint32(s[i])) * prime32 - h = (h ^ uint32(s[i+1])) * prime32 - h = (h ^ uint32(s[i+2])) * prime32 - h = (h ^ uint32(s[i+3])) * prime32 - h = (h ^ uint32(s[i+4])) * prime32 - h = (h ^ uint32(s[i+5])) * prime32 - h = (h ^ uint32(s[i+6])) * prime32 - h = (h ^ uint32(s[i+7])) * prime32 - i += 8 - } - - for _, c := range s[i:] { - h = (h ^ uint32(c)) * prime32 + for len(s) >= 8 { + h = (h ^ uint32(s[0])) * prime32 + h = (h ^ uint32(s[1])) * prime32 + h = (h ^ uint32(s[2])) * prime32 + h = (h ^ uint32(s[3])) * prime32 + h = (h ^ uint32(s[4])) * prime32 + h = (h ^ uint32(s[5])) * prime32 + h = (h ^ uint32(s[6])) * prime32 + h = (h ^ uint32(s[7])) * prime32 + s = s[8:] + } + + if len(s) >= 4 { + h = (h ^ uint32(s[0])) * prime32 + h = (h ^ uint32(s[1])) * prime32 + h = (h ^ uint32(s[2])) * prime32 + h = (h ^ uint32(s[3])) * prime32 + s = s[4:] + } + + if len(s) >= 2 { + h = (h ^ uint32(s[0])) * prime32 + h = (h ^ uint32(s[1])) * prime32 + s = s[2:] + } + + if len(s) > 0 { + h = (h ^ uint32(s[0])) * prime32 + } + + return h +} + +// AddBytes32 adds the hash of b to the precomputed hash value h. +func AddBytes32(h uint32, b []byte) uint32 { + for len(b) >= 8 { + h = (h ^ uint32(b[0])) * prime32 + h = (h ^ uint32(b[1])) * prime32 + h = (h ^ uint32(b[2])) * prime32 + h = (h ^ uint32(b[3])) * prime32 + h = (h ^ uint32(b[4])) * prime32 + h = (h ^ uint32(b[5])) * prime32 + h = (h ^ uint32(b[6])) * prime32 + h = (h ^ uint32(b[7])) * prime32 + b = b[8:] + } + + if len(b) >= 4 { + h = (h ^ uint32(b[0])) * prime32 + h = (h ^ uint32(b[1])) * prime32 + h = (h ^ uint32(b[2])) * prime32 + h = (h ^ uint32(b[3])) * prime32 + b = b[4:] + } + + if len(b) >= 2 { + h = (h ^ uint32(b[0])) * prime32 + h = (h ^ uint32(b[1])) * prime32 + b = b[2:] + } + + if len(b) > 0 { + h = (h ^ uint32(b[0])) * prime32 } return h diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 780e387e3f..08f8230d6d 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -509,7 +509,7 @@ ccflags="$@" $2 ~ /^CAP_/ || $2 ~ /^ALG_/ || $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ || - $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|GETFLAGS)/ || + $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|[GS]ETFLAGS)/ || $2 ~ /^FS_VERITY_/ || $2 ~ /^FSCRYPT_/ || $2 ~ /^GRND_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 7b7c727525..e50e4cb276 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1950,6 +1950,20 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { return int(n), nil } +func isGroupMember(gid int) bool { + groups, err := Getgroups() + if err != nil { + return false + } + + for _, g := range groups { + if g == gid { + return true + } + } + return false +} + //sys faccessat(dirfd int, path string, mode uint32) (err error) func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -2007,7 +2021,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { gid = Getgid() } - if uint32(gid) == st.Gid { + if uint32(gid) == st.Gid || isGroupMember(gid) { fmode = (st.Mode >> 3) & 7 } else { fmode = st.Mode & 7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8d207b041e..11b25f68c2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -78,6 +78,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0xc F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c4bf9cb80f..f92cff6ea0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -78,6 +78,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 0cab0522e6..12bcbf88d6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0xc F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 370d0a7f59..8b0e024b94 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -80,6 +80,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index fbf2f3174e..eeadea943f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x21 F_GETLK64 = 0x21 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 25e74b30a9..0be6c4ccc0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0xe F_GETLK64 = 0xe diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 4ecc0bca34..0880b745c1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0xe F_GETLK64 = 0xe diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index dfb8f88a7e..c8a66627aa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x21 F_GETLK64 = 0x21 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 72d8dad5b8..97aae63f16 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x5 F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ca0e7b5262..b0c3b0664f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x5 F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 147511a974..0c05181935 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 517349dafa..0b96bd462e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 094822465b..bd5c305779 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -81,6 +81,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x7 F_GETLK64 = 0x7 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 6f79227d74..b91c2ae0f0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -125,9 +125,9 @@ type Statfs_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [1024]int8 - Mntonname [1024]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte } type statfs_freebsd11_t struct { @@ -150,9 +150,9 @@ type statfs_freebsd11_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [88]int8 - Mntonname [88]int8 + Fstypename [16]byte + Mntfromname [88]byte + Mntonname [88]byte } type Flock_t struct { diff --git a/vendor/modules.txt b/vendor/modules.txt index 742f5ee0b1..71706b4dd2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -132,6 +132,7 @@ github.com/cenkalti/backoff # github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash # github.com/cespare/xxhash/v2 v2.1.1 +## explicit github.com/cespare/xxhash/v2 # github.com/containerd/containerd v1.3.4 github.com/containerd/containerd/errdefs @@ -753,7 +754,8 @@ github.com/samuel/go-zookeeper/zk github.com/satori/go.uuid # github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 github.com/sean-/seed -# github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e +# github.com/segmentio/fasthash v1.0.2 +## explicit github.com/segmentio/fasthash/fnv1a # github.com/sercand/kuberesolver v2.4.0+incompatible github.com/sercand/kuberesolver @@ -1028,7 +1030,8 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 +# golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae +## explicit golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix