Like Prometheus, but for logs.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
loki/pkg/querier/tail_test.go

467 lines
14 KiB

package querier
import (
"errors"
"testing"
"time"
"github.com/go-kit/log"
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
gokitlog "github.com/go-kit/log"
"github.com/grafana/loki/v3/pkg/iter"
loghttp "github.com/grafana/loki/v3/pkg/loghttp/legacy"
"github.com/grafana/loki/v3/pkg/logproto"
)
const (
timeout = 1 * time.Second
throttle = 10 * time.Millisecond
)
func TestTailer(t *testing.T) {
t.Parallel()
tests := map[string]struct {
historicEntries iter.EntryIterator
tailClient *tailClientMock
tester func(t *testing.T, tailer *Tailer, tailClient *tailClientMock)
}{
"tail logs from historic entries only (no tail clients provided)": {
historicEntries: mockStreamIterator(1, 2),
tailClient: nil,
tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
responses, err := readFromTailer(tailer, 2)
require.NoError(t, err)
actual := flattenStreamsFromResponses(responses)
assert.Equal(t, []logproto.Stream{
mockStream(1, 1),
mockStream(2, 1),
}, actual)
},
},
"tail logs from tail clients only (no historic entries provided)": {
historicEntries: mockStreamIterator(0, 0),
tailClient: newTailClientMock().mockRecvWithTrigger(mockTailResponse(mockStream(1, 1))),
tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
tailClient.triggerRecv()
responses, err := readFromTailer(tailer, 1)
require.NoError(t, err)
actual := flattenStreamsFromResponses(responses)
assert.Equal(t, []logproto.Stream{
mockStream(1, 1),
}, actual)
},
},
"tail logs both from historic entries and tail clients": {
historicEntries: mockStreamIterator(1, 2),
tailClient: newTailClientMock().mockRecvWithTrigger(mockTailResponse(mockStream(3, 1))),
tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
tailClient.triggerRecv()
responses, err := readFromTailer(tailer, 3)
require.NoError(t, err)
actual := flattenStreamsFromResponses(responses)
assert.Equal(t, []logproto.Stream{
mockStream(1, 1),
mockStream(2, 1),
mockStream(3, 1),
}, actual)
},
},
"honor max entries per tail response": {
historicEntries: mockStreamIterator(1, maxEntriesPerTailResponse+1),
tailClient: nil,
tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
responses, err := readFromTailer(tailer, maxEntriesPerTailResponse+1)
require.NoError(t, err)
require.Equal(t, 2, len(responses))
assert.Equal(t, maxEntriesPerTailResponse, countEntriesInStreams(responses[0].Streams))
assert.Equal(t, 1, countEntriesInStreams(responses[1].Streams))
assert.Equal(t, 0, len(responses[1].DroppedEntries))
},
},
"honor max buffered tail responses": {
historicEntries: mockStreamIterator(1, (maxEntriesPerTailResponse*maxBufferedTailResponses)+5),
tailClient: newTailClientMock().mockRecvWithTrigger(mockTailResponse(mockStream(1, 1))),
tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
err := waitUntilTailerOpenStreamsHaveBeenConsumed(tailer)
require.NoError(t, err)
// Since the response channel is full/blocked, we do expect that all responses
// are "full" and extra entries from historic entries have been dropped
responses, err := readFromTailer(tailer, (maxEntriesPerTailResponse * maxBufferedTailResponses))
require.NoError(t, err)
require.Equal(t, maxBufferedTailResponses, len(responses))
for i := 0; i < maxBufferedTailResponses; i++ {
assert.Equal(t, maxEntriesPerTailResponse, countEntriesInStreams(responses[i].Streams))
assert.Equal(t, 0, len(responses[1].DroppedEntries))
}
// Since we'll not receive dropped entries until the next tail response, we're now
// going to trigger a Recv() from the tail client
tailClient.triggerRecv()
responses, err = readFromTailer(tailer, 1)
require.NoError(t, err)
require.Equal(t, 1, len(responses))
assert.Equal(t, 1, countEntriesInStreams(responses[0].Streams))
assert.Equal(t, 5, len(responses[0].DroppedEntries))
},
},
"honor max dropped entries per tail response": {
historicEntries: mockStreamIterator(1, (maxEntriesPerTailResponse*maxBufferedTailResponses)+maxDroppedEntriesPerTailResponse+5),
tailClient: newTailClientMock().mockRecvWithTrigger(mockTailResponse(mockStream(1, 1))),
tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
err := waitUntilTailerOpenStreamsHaveBeenConsumed(tailer)
require.NoError(t, err)
// Since the response channel is full/blocked, we do expect that all responses
// are "full" and extra entries from historic entries have been dropped
responses, err := readFromTailer(tailer, (maxEntriesPerTailResponse * maxBufferedTailResponses))
require.NoError(t, err)
require.Equal(t, maxBufferedTailResponses, len(responses))
for i := 0; i < maxBufferedTailResponses; i++ {
assert.Equal(t, maxEntriesPerTailResponse, countEntriesInStreams(responses[i].Streams))
assert.Equal(t, 0, len(responses[1].DroppedEntries))
}
// Since we'll not receive dropped entries until the next tail response, we're now
// going to trigger a Recv() from the tail client
tailClient.triggerRecv()
responses, err = readFromTailer(tailer, 1)
require.NoError(t, err)
require.Equal(t, 1, len(responses))
assert.Equal(t, 1, countEntriesInStreams(responses[0].Streams))
assert.Equal(t, maxDroppedEntriesPerTailResponse, len(responses[0].DroppedEntries))
},
},
}
for testName, test := range tests {
t.Run(testName, func(t *testing.T) {
tailDisconnectedIngesters := func([]string) (map[string]logproto.Querier_TailClient, error) {
return map[string]logproto.Querier_TailClient{}, nil
}
tailClients := map[string]logproto.Querier_TailClient{}
if test.tailClient != nil {
tailClients["test"] = test.tailClient
}
tailer := newTailer(0, tailClients, test.historicEntries, tailDisconnectedIngesters, timeout, throttle, false, NewMetrics(nil), gokitlog.NewNopLogger())
defer tailer.close()
test.tester(t, tailer, test.tailClient)
})
}
}
func TestCategorizedLabels(t *testing.T) {
t.Parallel()
lbs := labels.FromStrings("app", "foo")
createHistoricalEntries := func() iter.EntryIterator {
return iter.NewStreamIterator(logproto.Stream{
Labels: lbs.String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(1, 0),
Line: "foo=1",
},
{
Timestamp: time.Unix(2, 0),
Line: "foo=2",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123")),
},
},
})
}
createTailClients := func() map[string]*tailClientMock {
return map[string]*tailClientMock{
"test1": newTailClientMock().mockRecvWithTrigger(mockTailResponse(logproto.Stream{
Labels: lbs.String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(3, 0),
Line: "foo=3",
},
},
})),
"test2": newTailClientMock().mockRecvWithTrigger(mockTailResponse(logproto.Stream{
Labels: labels.NewBuilder(lbs).Set("traceID", "123").Labels().String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(4, 0),
Line: "foo=4",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123")),
},
},
})),
"test3": newTailClientMock().mockRecvWithTrigger(mockTailResponse(logproto.Stream{
Labels: labels.NewBuilder(lbs).Set("traceID", "123").Set("foo", "5").Labels().String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(5, 0),
Line: "foo=5",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123")),
Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings("foo", "5")),
},
},
})),
}
}
for _, tc := range []struct {
name string
categorizeLabels bool
historicEntries iter.EntryIterator
tailClients map[string]*tailClientMock
expectedStreams []logproto.Stream
}{
{
name: "without categorize",
categorizeLabels: false,
historicEntries: createHistoricalEntries(),
tailClients: createTailClients(),
expectedStreams: []logproto.Stream{
{
Labels: lbs.String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(1, 0),
Line: "foo=1",
},
},
},
{
Labels: lbs.String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(2, 0),
Line: "foo=2",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123")),
},
},
},
{
Labels: lbs.String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(3, 0),
Line: "foo=3",
},
},
},
{
Labels: labels.NewBuilder(lbs).Set("traceID", "123").Labels().String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(4, 0),
Line: "foo=4",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123")),
},
},
},
{
Labels: labels.NewBuilder(lbs).Set("traceID", "123").Set("foo", "5").Labels().String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(5, 0),
Line: "foo=5",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123")),
Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings("foo", "5")),
},
},
},
},
},
{
name: "categorize",
categorizeLabels: true,
historicEntries: createHistoricalEntries(),
tailClients: createTailClients(),
expectedStreams: []logproto.Stream{
{
Labels: lbs.String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(1, 0),
Line: "foo=1",
},
},
},
{
Labels: lbs.String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(2, 0),
Line: "foo=2",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123")),
},
},
},
{
Labels: lbs.String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(3, 0),
Line: "foo=3",
},
},
},
{
Labels: lbs.String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(4, 0),
Line: "foo=4",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123")),
},
},
},
{
Labels: lbs.String(),
Entries: []logproto.Entry{
{
Timestamp: time.Unix(5, 0),
Line: "foo=5",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123")),
Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings("foo", "5")),
},
},
},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
tailDisconnectedIngesters := func([]string) (map[string]logproto.Querier_TailClient, error) {
return map[string]logproto.Querier_TailClient{}, nil
}
tailClients := map[string]logproto.Querier_TailClient{}
for k, v := range tc.tailClients {
tailClients[k] = v
}
tailer := newTailer(0, tailClients, tc.historicEntries, tailDisconnectedIngesters, timeout, throttle, tc.categorizeLabels, NewMetrics(nil), log.NewNopLogger())
defer tailer.close()
// Make tail clients receive their responses
for _, client := range tc.tailClients {
client.triggerRecv()
}
err := waitUntilTailerOpenStreamsHaveBeenConsumed(tailer)
require.NoError(t, err)
maxEntries := countEntriesInStreams(tc.expectedStreams)
responses, err := readFromTailer(tailer, maxEntries)
require.NoError(t, err)
streams := flattenStreamsFromResponses(responses)
require.ElementsMatch(t, tc.expectedStreams, streams)
})
}
}
Loki HTTP/JSON Model Layer (#1022) * First pass data model Signed-off-by: Joe Elliott <number101010@gmail.com> * Use prom model b/c we're serializing promql objects Signed-off-by: Joe Elliott <number101010@gmail.com> * Added legacy query support and tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Added legacy label test Signed-off-by: Joe Elliott <number101010@gmail.com> * Added tail response marshalling and tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed marshallers and test Signed-off-by: Joe Elliott <number101010@gmail.com> * Expanded legacy test cases Signed-off-by: Joe Elliott <number101010@gmail.com> * Dropped streams nano test Signed-off-by: Joe Elliott <number101010@gmail.com> * First pass v1 new objects Signed-off-by: Joe Elliott <number101010@gmail.com> * Added failing tail response tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Added failing tailresponse test Signed-off-by: Joe Elliott <number101010@gmail.com> * Partial v1 tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Improved legacy labels test Signed-off-by: Joe Elliott <number101010@gmail.com> * Improved legacy query tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Moved all legacy tests to new method Signed-off-by: Joe Elliott <number101010@gmail.com> * Added v1 tests and fixed stream marshalling bug Signed-off-by: Joe Elliott <number101010@gmail.com> * First pass new Model Signed-off-by: Joe Elliott <number101010@gmail.com> * Added vector test Signed-off-by: Joe Elliott <number101010@gmail.com> * Added matrix tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Added conversions for all things except tailed responses Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed mixed case issues Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed tail marshalling Signed-off-by: Joe Elliott <number101010@gmail.com> * Removed unused testStream Signed-off-by: Joe Elliott <number101010@gmail.com> * Moved TailResponse to loghttp Signed-off-by: Joe Elliott <number101010@gmail.com> * Removed legacy tailresponse objects in favor of actual legacy tailresponse objects Signed-off-by: Joe Elliott <number101010@gmail.com> * Updated v1 methods to take legacy tail objects Signed-off-by: Joe Elliott <number101010@gmail.com> * Cleaned up tests. Added some comments Signed-off-by: Joe Elliott <number101010@gmail.com> * Versioned tail endpoint Signed-off-by: Joe Elliott <number101010@gmail.com> * Improved readability on loghttp packages in http.go Signed-off-by: Joe Elliott <number101010@gmail.com> * Removed new as a var name Signed-off-by: Joe Elliott <number101010@gmail.com> * Started all error messages with lowercase alerts Signed-off-by: Joe Elliott <number101010@gmail.com> * new => ret Signed-off-by: Joe Elliott <number101010@gmail.com> * Added comments on exported methods Signed-off-by: Joe Elliott <number101010@gmail.com> * Removed two personal notes Signed-off-by: Joe Elliott <number101010@gmail.com> * Changed legacy package name to loghttp Signed-off-by: Joe Elliott <number101010@gmail.com> * Moved and renamed loghttp v1 package Signed-off-by: Joe Elliott <number101010@gmail.com> * Moved marshalling code out of model Signed-off-by: Joe Elliott <number101010@gmail.com> * Added package comments Signed-off-by: Joe Elliott <number101010@gmail.com> * Added legacy testing Signed-off-by: Joe Elliott <number101010@gmail.com> * Changed DroppedStream slice to value type for consistency Signed-off-by: Joe Elliott <number101010@gmail.com> * gofmt'ed test files Signed-off-by: Joe Elliott <number101010@gmail.com> * Cleaned up linting issues Signed-off-by: Joe Elliott <number101010@gmail.com> * Minor comment cleanup Signed-off-by: Joe Elliott <number101010@gmail.com> * Adjusted GOGC to make CircleCI happy Signed-off-by: Joe Elliott <number101010@gmail.com> * Changed legacy => loghttp for consistency Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed matrix error message to be correct Signed-off-by: Joe Elliott <number101010@gmail.com> * Moved label query over to loghttp response Signed-off-by: Joe Elliott <number101010@gmail.com> * Added marshal loop tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Added response type test Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed tail response marshal/unmarshal Signed-off-by: Joe Elliott <number101010@gmail.com> * Passing unmarshal/marshal queryresponse tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed vector and matrix Signed-off-by: Joe Elliott <number101010@gmail.com> * Added output support for streams minus ordering Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed tailing Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed output tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed query tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Order log output Signed-off-by: Joe Elliott <number101010@gmail.com> * Use labels instead of stream Signed-off-by: Joe Elliott <number101010@gmail.com> * Lowered parallelization for CircleCI Signed-off-by: Joe Elliott <number101010@gmail.com>
6 years ago
func readFromTailer(tailer *Tailer, maxEntries int) ([]*loghttp.TailResponse, error) {
responses := make([]*loghttp.TailResponse, 0)
entriesCount := 0
// Ensure we do not wait indefinitely
timeoutTicker := time.NewTicker(timeout)
defer timeoutTicker.Stop()
Add mutex for t.stopped to prevent data races (#11933) **What this PR does / why we need it**: This addresses the data race present on the `t.stopped` variable in `tail.go`. ``` ================== WARNING: DATA RACE Write at 0x00c00098b198 by goroutine 568: github.com/grafana/loki/pkg/querier.(*Tailer).close() /Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail.go:272 +0x104 github.com/grafana/loki/pkg/querier.TestTailer.func7.2() /Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail_test.go:169 +0x34 runtime.deferreturn() /opt/homebrew/Cellar/go/1.21.6/libexec/src/runtime/panic.go:477 +0x34 testing.tRunner() /opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1595 +0x1b0 testing.(*T).Run.func1() /opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1648 +0x40 Previous read at 0x00c00098b198 by goroutine 569: github.com/grafana/loki/pkg/querier.(*Tailer).loop() /Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail.go:88 +0x13c github.com/grafana/loki/pkg/querier.newTailer.func1() /Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail.go:342 +0x34 Goroutine 568 (running) created at: testing.(*T).Run() /opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1648 +0x5e8 github.com/grafana/loki/pkg/querier.TestTailer() /Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail_test.go:158 +0x10dc testing.tRunner() /opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1595 +0x1b0 testing.(*T).Run.func1() /opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1648 +0x40 Goroutine 569 (running) created at: github.com/grafana/loki/pkg/querier.newTailer() /Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail.go:342 +0x300 github.com/grafana/loki/pkg/querier.TestTailer.func7() /Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail_test.go:168 +0x138 testing.tRunner() /opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1595 +0x1b0 testing.(*T).Run.func1() /opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1648 +0x40 ================== ``` **Which issue(s) this PR fixes**: Relates to: https://github.com/grafana/loki/issues/8586 **Special notes for your reviewer**: **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
1 year ago
for !tailer.stopped.Load() && entriesCount < maxEntries {
select {
case <-timeoutTicker.C:
return nil, errors.New("timeout expired while reading responses from Tailer")
case response := <-tailer.getResponseChan():
responses = append(responses, response)
entriesCount += countEntriesInStreams(response.Streams)
default:
time.Sleep(throttle)
}
}
return responses, nil
}
func waitUntilTailerOpenStreamsHaveBeenConsumed(tailer *Tailer) error {
// Ensure we do not wait indefinitely
timeoutTicker := time.NewTicker(timeout)
defer timeoutTicker.Stop()
for {
if isTailerOpenStreamsConsumed(tailer) {
return nil
}
select {
case <-timeoutTicker.C:
return errors.New("timeout expired while waiting for Tailer to consume open streams")
default:
time.Sleep(throttle)
}
}
}
// isTailerOpenStreamsConsumed returns whether the input Tailer has fully
// consumed all streams from the openStreamIterator, which means the
// Tailer.loop() is now throttling
func isTailerOpenStreamsConsumed(tailer *Tailer) bool {
tailer.streamMtx.Lock()
defer tailer.streamMtx.Unlock()
Iterators: re-implement mergeEntryIterator using loser.Tree for performance (#8637) **What this PR does / why we need it**: Building on #8351, this re-implements `mergeEntryIterator` using `loser.Tree`; the benchmark says it goes much faster but uses a bit more memory (while building the tree). ``` name old time/op new time/op delta SortIterator/merge_sort-4 10.7ms ± 4% 2.9ms ± 2% -72.74% (p=0.008 n=5+5) name old alloc/op new alloc/op delta SortIterator/merge_sort-4 11.2kB ± 0% 21.7kB ± 0% +93.45% (p=0.008 n=5+5) name old allocs/op new allocs/op delta SortIterator/merge_sort-4 6.00 ± 0% 7.00 ± 0% +16.67% (p=0.008 n=5+5) ``` The implementation is very different: rather than relying on iterators supporting `Peek()`, `mergeEntryIterator` now pulls items into its buffer until it finds one with a different timestamp or stream, and always works off what is in the buffer. The comment `"[we] pop the ones whose common value occurs most often."` did not appear to match the previous implementation, and no attempt was made to match this comment. A `Push()` function was added to `loser.Tree` to support live-streaming. This works by finding or making an empty slot, then re-running the initialize function to find the new winner. A consequence is that the previous "winner" value is lost after calling `Push()`, and users must call `Next()` to see the next item. A couple of tests had to be amended to avoid assuming particular behaviour of the implementation; I recommend that reviewers consider these closely. **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - NA Documentation added - [x] Tests updated - NA `CHANGELOG.md` updated - NA Changes that require user attention or interaction to upgrade are documented in `docs/sources/upgrading/_index.md`
2 years ago
return tailer.openStreamIterator.IsEmpty() || tailer.openStreamIterator.Peek() == time.Unix(0, 0)
}
func countEntriesInStreams(streams []logproto.Stream) int {
count := 0
for _, stream := range streams {
count += len(stream.Entries)
}
return count
}
// flattenStreamsFromResponses returns an array of streams each one containing
// one and only one entry from the input list of responses. This function is used
// to abstract away implementation details in the Tailer when testing for the output
// regardless how the responses have been generated (ie. multiple entries grouped
// into the same stream)
Loki HTTP/JSON Model Layer (#1022) * First pass data model Signed-off-by: Joe Elliott <number101010@gmail.com> * Use prom model b/c we're serializing promql objects Signed-off-by: Joe Elliott <number101010@gmail.com> * Added legacy query support and tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Added legacy label test Signed-off-by: Joe Elliott <number101010@gmail.com> * Added tail response marshalling and tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed marshallers and test Signed-off-by: Joe Elliott <number101010@gmail.com> * Expanded legacy test cases Signed-off-by: Joe Elliott <number101010@gmail.com> * Dropped streams nano test Signed-off-by: Joe Elliott <number101010@gmail.com> * First pass v1 new objects Signed-off-by: Joe Elliott <number101010@gmail.com> * Added failing tail response tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Added failing tailresponse test Signed-off-by: Joe Elliott <number101010@gmail.com> * Partial v1 tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Improved legacy labels test Signed-off-by: Joe Elliott <number101010@gmail.com> * Improved legacy query tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Moved all legacy tests to new method Signed-off-by: Joe Elliott <number101010@gmail.com> * Added v1 tests and fixed stream marshalling bug Signed-off-by: Joe Elliott <number101010@gmail.com> * First pass new Model Signed-off-by: Joe Elliott <number101010@gmail.com> * Added vector test Signed-off-by: Joe Elliott <number101010@gmail.com> * Added matrix tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Added conversions for all things except tailed responses Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed mixed case issues Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed tail marshalling Signed-off-by: Joe Elliott <number101010@gmail.com> * Removed unused testStream Signed-off-by: Joe Elliott <number101010@gmail.com> * Moved TailResponse to loghttp Signed-off-by: Joe Elliott <number101010@gmail.com> * Removed legacy tailresponse objects in favor of actual legacy tailresponse objects Signed-off-by: Joe Elliott <number101010@gmail.com> * Updated v1 methods to take legacy tail objects Signed-off-by: Joe Elliott <number101010@gmail.com> * Cleaned up tests. Added some comments Signed-off-by: Joe Elliott <number101010@gmail.com> * Versioned tail endpoint Signed-off-by: Joe Elliott <number101010@gmail.com> * Improved readability on loghttp packages in http.go Signed-off-by: Joe Elliott <number101010@gmail.com> * Removed new as a var name Signed-off-by: Joe Elliott <number101010@gmail.com> * Started all error messages with lowercase alerts Signed-off-by: Joe Elliott <number101010@gmail.com> * new => ret Signed-off-by: Joe Elliott <number101010@gmail.com> * Added comments on exported methods Signed-off-by: Joe Elliott <number101010@gmail.com> * Removed two personal notes Signed-off-by: Joe Elliott <number101010@gmail.com> * Changed legacy package name to loghttp Signed-off-by: Joe Elliott <number101010@gmail.com> * Moved and renamed loghttp v1 package Signed-off-by: Joe Elliott <number101010@gmail.com> * Moved marshalling code out of model Signed-off-by: Joe Elliott <number101010@gmail.com> * Added package comments Signed-off-by: Joe Elliott <number101010@gmail.com> * Added legacy testing Signed-off-by: Joe Elliott <number101010@gmail.com> * Changed DroppedStream slice to value type for consistency Signed-off-by: Joe Elliott <number101010@gmail.com> * gofmt'ed test files Signed-off-by: Joe Elliott <number101010@gmail.com> * Cleaned up linting issues Signed-off-by: Joe Elliott <number101010@gmail.com> * Minor comment cleanup Signed-off-by: Joe Elliott <number101010@gmail.com> * Adjusted GOGC to make CircleCI happy Signed-off-by: Joe Elliott <number101010@gmail.com> * Changed legacy => loghttp for consistency Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed matrix error message to be correct Signed-off-by: Joe Elliott <number101010@gmail.com> * Moved label query over to loghttp response Signed-off-by: Joe Elliott <number101010@gmail.com> * Added marshal loop tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Added response type test Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed tail response marshal/unmarshal Signed-off-by: Joe Elliott <number101010@gmail.com> * Passing unmarshal/marshal queryresponse tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed vector and matrix Signed-off-by: Joe Elliott <number101010@gmail.com> * Added output support for streams minus ordering Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed tailing Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed output tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Fixed query tests Signed-off-by: Joe Elliott <number101010@gmail.com> * Order log output Signed-off-by: Joe Elliott <number101010@gmail.com> * Use labels instead of stream Signed-off-by: Joe Elliott <number101010@gmail.com> * Lowered parallelization for CircleCI Signed-off-by: Joe Elliott <number101010@gmail.com>
6 years ago
func flattenStreamsFromResponses(responses []*loghttp.TailResponse) []logproto.Stream {
result := make([]logproto.Stream, 0)
for _, response := range responses {
for _, stream := range response.Streams {
for _, entry := range stream.Entries {
result = append(result, logproto.Stream{
Entries: []logproto.Entry{entry},
Labels: stream.Labels,
})
}
}
}
return result
}