Fixes all lint errors. (#2768)

Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com>
pull/2694/head^2
Cyril Tovena 6 years ago committed by GitHub
parent 607ccc4d77
commit 62272cdaa1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      cmd/loki/main.go
  2. 2
      pkg/canary/reader/reader.go
  3. 4
      pkg/distributor/distributor.go
  4. 2
      pkg/ingester/flush.go
  5. 2
      pkg/ingester/instance.go
  6. 2
      pkg/ingester/transfer.go
  7. 5
      pkg/logcli/output/default_test.go
  8. 2
      pkg/logcli/output/jsonl_test.go
  9. 2
      pkg/logcli/output/output.go
  10. 14
      pkg/logcli/output/output_test.go
  11. 4
      pkg/logcli/output/raw.go
  12. 2
      pkg/logcli/output/raw_test.go
  13. 6
      pkg/logcli/query/query.go
  14. 198
      pkg/logcli/query/query_test.go
  15. 2
      pkg/logcli/seriesquery/series.go
  16. 18
      pkg/logentry/stages/drop.go
  17. 2
      pkg/logentry/stages/drop_test.go
  18. 3
      pkg/logentry/stages/labeldrop_test.go
  19. 12
      pkg/logentry/stages/replace_test.go
  20. 4
      pkg/loki/modules.go
  21. 7
      pkg/lokifrontend/config.go
  22. 2
      pkg/promtail/client/client.go
  23. 2
      pkg/promtail/client/multi.go
  24. 1
      pkg/promtail/targets/file/filetarget.go
  25. 1
      pkg/promtail/targets/file/filetargetmanager.go
  26. 32
      pkg/promtail/targets/file/tailer.go
  27. 1
      pkg/promtail/targets/journal/journaltargetmanager.go
  28. 1
      pkg/promtail/targets/lokipush/pushtarget.go
  29. 3
      pkg/promtail/targets/lokipush/pushtargetmanager.go
  30. 15
      pkg/promtail/targets/stdin/stdin_target_manager.go
  31. 1
      pkg/promtail/targets/syslog/syslogtarget.go
  32. 1
      pkg/promtail/targets/syslog/syslogtargetmanager.go
  33. 1
      pkg/promtail/targets/target/target.go
  34. 2
      pkg/querier/ingester_querier_test.go
  35. 19
      pkg/ruler/manager/memstore_test.go
  36. 5
      pkg/ruler/ruler.go
  37. 3
      pkg/storage/async_store_test.go
  38. 2
      pkg/storage/hack/main.go
  39. 12
      pkg/storage/store.go
  40. 10
      pkg/storage/store_test.go
  41. 4
      pkg/storage/stores/shipper/compactor/metrics.go
  42. 10
      pkg/storage/stores/shipper/compactor/table.go
  43. 4
      pkg/storage/stores/shipper/downloads/table.go
  44. 12
      pkg/storage/stores/shipper/downloads/table_manager_test.go
  45. 2
      pkg/storage/stores/shipper/testutil/testutil.go
  46. 11
      pkg/storage/stores/shipper/uploads/table_manager_test.go
  47. 36
      pkg/storage/stores/shipper/util/queries_test.go
  48. 3
      pkg/storage/stores/shipper/util/util_test.go
  49. 3
      tools/lambda-promtail/lambda-promtail/main.go

@ -5,7 +5,6 @@ import (
"fmt"
"os"
"reflect"
"strings"
"github.com/cortexproject/cortex/pkg/util/flagext"
"github.com/go-kit/kit/log/level"
@ -28,8 +27,6 @@ func init() {
prometheus.MustRegister(version.NewCollector("loki"))
}
var lineReplacer = strings.NewReplacer("\n", "\\n ")
type Config struct {
loki.Config `yaml:",inline"`
printVersion bool

@ -434,6 +434,6 @@ func nextBackoff(w io.Writer, statusCode int, backoff *util.Backoff) time.Time {
} else {
next = time.Now().Add(backoff.NextDelay())
}
fmt.Fprintf(w, "Loki returned an error code: %v, waiting %v before next query.", statusCode, next.Sub(time.Now()))
fmt.Fprintf(w, "Loki returned an error code: %v, waiting %v before next query.", statusCode, time.Until(next))
return next
}

@ -263,8 +263,8 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
tracker := pushTracker{
done: make(chan struct{}),
err: make(chan error),
done: make(chan struct{}),
err: make(chan error),
}
tracker.samplesPending.Store(int32(len(streams)))
for ingester, samples := range samplesByIngester {

@ -307,7 +307,7 @@ func (i *Ingester) removeFlushedChunks(instance *instance, stream *stream) {
}
}
func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs labels.Labels, cs []*chunkDesc, streamsMtx *sync.RWMutex) error {
func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs labels.Labels, cs []*chunkDesc, streamsMtx sync.Locker) error {
userID, err := user.ExtractOrgID(ctx)
if err != nil {
return err

@ -531,4 +531,4 @@ func shouldConsiderStream(stream *stream, req *logproto.SeriesRequest) bool {
return true
}
return false
}
}

@ -244,7 +244,7 @@ func (i *Ingester) transferOut(ctx context.Context) error {
return err
}
chunks := make([]*logproto.Chunk, 1, 1)
chunks := make([]*logproto.Chunk, 1)
chunks[0] = &logproto.Chunk{
Data: bb,
}

@ -83,9 +83,8 @@ func TestDefaultOutput_Format(t *testing.T) {
t.Run(testName, func(t *testing.T) {
t.Parallel()
writer := &bytes.Buffer{}
out := &DefaultOutput{writer,testData.options}
out := &DefaultOutput{writer, testData.options}
out.FormatAndPrintln(testData.timestamp, testData.lbls, testData.maxLabelsLen, testData.line)
assert.Equal(t, testData.expected, writer.String())
@ -114,7 +113,7 @@ func TestDefaultOutput_FormatLabelsPadding(t *testing.T) {
maxLabelsLen := findMaxLabelsLength(labelsList)
options := &LogOutputOptions{Timezone: time.UTC, NoLabels: false}
writer := &bytes.Buffer{}
out := &DefaultOutput{writer,options}
out := &DefaultOutput{writer, options}
// Format the same log line with different labels
formattedEntries := make([]string, 0, len(labelsList))

@ -68,7 +68,7 @@ func TestJSONLOutput_Format(t *testing.T) {
t.Run(testName, func(t *testing.T) {
t.Parallel()
writer := &bytes.Buffer{}
out := &JSONLOutput{writer,testData.options}
out := &JSONLOutput{writer, testData.options}
out.FormatAndPrintln(testData.timestamp, testData.lbls, testData.maxLabelsLen, testData.line)
actual := writer.String()

@ -67,7 +67,7 @@ func NewLogOutput(w io.Writer, mode string, options *LogOutputOptions) (LogOutpu
func getColor(labels string) *color.Color {
hash := fnv.New32()
hash.Write([]byte(labels))
_, _ = hash.Write([]byte(labels))
id := hash.Sum32() % uint32(len(colorList))
color := colorList[id]
return color

@ -10,19 +10,19 @@ import (
func TestNewLogOutput(t *testing.T) {
options := &LogOutputOptions{time.UTC, false, false}
out, err := NewLogOutput(nil,"default", options)
out, err := NewLogOutput(nil, "default", options)
assert.NoError(t, err)
assert.IsType(t, &DefaultOutput{nil,options}, out)
assert.IsType(t, &DefaultOutput{nil, options}, out)
out, err = NewLogOutput(nil,"jsonl", options)
out, err = NewLogOutput(nil, "jsonl", options)
assert.NoError(t, err)
assert.IsType(t, &JSONLOutput{nil,options}, out)
assert.IsType(t, &JSONLOutput{nil, options}, out)
out, err = NewLogOutput(nil,"raw", options)
out, err = NewLogOutput(nil, "raw", options)
assert.NoError(t, err)
assert.IsType(t, &RawOutput{nil,options}, out)
assert.IsType(t, &RawOutput{nil, options}, out)
out, err = NewLogOutput(nil,"unknown", options)
out, err = NewLogOutput(nil, "unknown", options)
assert.Error(t, err)
assert.Nil(t, out)
}

@ -14,9 +14,9 @@ type RawOutput struct {
options *LogOutputOptions
}
func NewRaw (writer io.Writer, options *LogOutputOptions) LogOutput {
func NewRaw(writer io.Writer, options *LogOutputOptions) LogOutput {
return &RawOutput{
w: writer,
w: writer,
options: options,
}
}

@ -67,7 +67,7 @@ func TestRawOutput_Format(t *testing.T) {
t.Parallel()
writer := &bytes.Buffer{}
out := &RawOutput{writer,testData.options}
out := &RawOutput{writer, testData.options}
out.FormatAndPrintln(testData.timestamp, testData.lbls, testData.maxLabelsLen, testData.line)
assert.Equal(t, testData.expected, writer.String())

@ -84,7 +84,7 @@ func (q *Query) DoQuery(c client.Client, out output.LogOutput, statistics bool)
if q.Limit < q.BatchSize {
q.BatchSize = q.Limit
}
resultLength := q.BatchSize
resultLength := 0
total := 0
start := q.Start
end := q.End
@ -114,7 +114,7 @@ func (q *Query) DoQuery(c client.Client, out output.LogOutput, statistics bool)
break
}
// Also no result, wouldn't expect to hit this.
if lastEntry == nil || len(lastEntry) == 0 {
if len(lastEntry) == 0 {
break
}
// Can only happen if all the results return in one request
@ -321,7 +321,7 @@ func (q *Query) printStream(streams loghttp.Streams, out output.LogOutput, lastE
printed := 0
for _, e := range allEntries {
// Skip the last entry if it overlaps, this happens because batching includes the last entry from the last batch
if lastEntry != nil && len(lastEntry) > 0 && e.entry.Timestamp == lastEntry[0].Timestamp {
if len(lastEntry) > 0 && e.entry.Timestamp == lastEntry[0].Timestamp {
skip := false
// Because many logs can share a timestamp in the unlucky event a batch ends with a timestamp
// shared by multiple entries we have to check all that were stored to see if we've already

@ -164,12 +164,12 @@ func Test_batch(t *testing.T) {
{
name: "super simple forward",
streams: []logproto.Stream{
logproto.Stream{
{
Labels: "{test=\"simple\"}",
Entries: []logproto.Entry{
logproto.Entry{Timestamp: time.Unix(1, 0), Line: "line1"},
logproto.Entry{Timestamp: time.Unix(2, 0), Line: "line2"},
logproto.Entry{Timestamp: time.Unix(3, 0), Line: "line3"}, // End timestamp is exclusive
{Timestamp: time.Unix(1, 0), Line: "line1"},
{Timestamp: time.Unix(2, 0), Line: "line2"},
{Timestamp: time.Unix(3, 0), Line: "line3"}, // End timestamp is exclusive
},
},
},
@ -188,12 +188,12 @@ func Test_batch(t *testing.T) {
{
name: "super simple backward",
streams: []logproto.Stream{
logproto.Stream{
{
Labels: "{test=\"simple\"}",
Entries: []logproto.Entry{
logproto.Entry{Timestamp: time.Unix(1, 0), Line: "line1"},
logproto.Entry{Timestamp: time.Unix(2, 0), Line: "line2"},
logproto.Entry{Timestamp: time.Unix(3, 0), Line: "line3"}, // End timestamp is exclusive
{Timestamp: time.Unix(1, 0), Line: "line1"},
{Timestamp: time.Unix(2, 0), Line: "line2"},
{Timestamp: time.Unix(3, 0), Line: "line3"}, // End timestamp is exclusive
},
},
},
@ -212,19 +212,19 @@ func Test_batch(t *testing.T) {
{
name: "single stream forward batch",
streams: []logproto.Stream{
logproto.Stream{
{
Labels: "{test=\"simple\"}",
Entries: []logproto.Entry{
logproto.Entry{Timestamp: time.Unix(1, 0), Line: "line1"},
logproto.Entry{Timestamp: time.Unix(2, 0), Line: "line2"},
logproto.Entry{Timestamp: time.Unix(3, 0), Line: "line3"},
logproto.Entry{Timestamp: time.Unix(4, 0), Line: "line4"},
logproto.Entry{Timestamp: time.Unix(5, 0), Line: "line5"},
logproto.Entry{Timestamp: time.Unix(6, 0), Line: "line6"},
logproto.Entry{Timestamp: time.Unix(7, 0), Line: "line7"},
logproto.Entry{Timestamp: time.Unix(8, 0), Line: "line8"},
logproto.Entry{Timestamp: time.Unix(9, 0), Line: "line9"},
logproto.Entry{Timestamp: time.Unix(10, 0), Line: "line10"},
{Timestamp: time.Unix(1, 0), Line: "line1"},
{Timestamp: time.Unix(2, 0), Line: "line2"},
{Timestamp: time.Unix(3, 0), Line: "line3"},
{Timestamp: time.Unix(4, 0), Line: "line4"},
{Timestamp: time.Unix(5, 0), Line: "line5"},
{Timestamp: time.Unix(6, 0), Line: "line6"},
{Timestamp: time.Unix(7, 0), Line: "line7"},
{Timestamp: time.Unix(8, 0), Line: "line8"},
{Timestamp: time.Unix(9, 0), Line: "line9"},
{Timestamp: time.Unix(10, 0), Line: "line10"},
},
},
},
@ -252,19 +252,19 @@ func Test_batch(t *testing.T) {
{
name: "single stream backward batch",
streams: []logproto.Stream{
logproto.Stream{
{
Labels: "{test=\"simple\"}",
Entries: []logproto.Entry{
logproto.Entry{Timestamp: time.Unix(1, 0), Line: "line1"},
logproto.Entry{Timestamp: time.Unix(2, 0), Line: "line2"},
logproto.Entry{Timestamp: time.Unix(3, 0), Line: "line3"},
logproto.Entry{Timestamp: time.Unix(4, 0), Line: "line4"},
logproto.Entry{Timestamp: time.Unix(5, 0), Line: "line5"},
logproto.Entry{Timestamp: time.Unix(6, 0), Line: "line6"},
logproto.Entry{Timestamp: time.Unix(7, 0), Line: "line7"},
logproto.Entry{Timestamp: time.Unix(8, 0), Line: "line8"},
logproto.Entry{Timestamp: time.Unix(9, 0), Line: "line9"},
logproto.Entry{Timestamp: time.Unix(10, 0), Line: "line10"},
{Timestamp: time.Unix(1, 0), Line: "line1"},
{Timestamp: time.Unix(2, 0), Line: "line2"},
{Timestamp: time.Unix(3, 0), Line: "line3"},
{Timestamp: time.Unix(4, 0), Line: "line4"},
{Timestamp: time.Unix(5, 0), Line: "line5"},
{Timestamp: time.Unix(6, 0), Line: "line6"},
{Timestamp: time.Unix(7, 0), Line: "line7"},
{Timestamp: time.Unix(8, 0), Line: "line8"},
{Timestamp: time.Unix(9, 0), Line: "line9"},
{Timestamp: time.Unix(10, 0), Line: "line10"},
},
},
},
@ -282,34 +282,34 @@ func Test_batch(t *testing.T) {
{
name: "two streams forward batch",
streams: []logproto.Stream{
logproto.Stream{
{
Labels: "{test=\"one\"}",
Entries: []logproto.Entry{
logproto.Entry{Timestamp: time.Unix(1, 0), Line: "line1"},
logproto.Entry{Timestamp: time.Unix(2, 0), Line: "line2"},
logproto.Entry{Timestamp: time.Unix(3, 0), Line: "line3"},
logproto.Entry{Timestamp: time.Unix(4, 0), Line: "line4"},
logproto.Entry{Timestamp: time.Unix(5, 0), Line: "line5"},
logproto.Entry{Timestamp: time.Unix(6, 0), Line: "line6"},
logproto.Entry{Timestamp: time.Unix(7, 0), Line: "line7"},
logproto.Entry{Timestamp: time.Unix(8, 0), Line: "line8"},
logproto.Entry{Timestamp: time.Unix(9, 0), Line: "line9"},
logproto.Entry{Timestamp: time.Unix(10, 0), Line: "line10"},
{Timestamp: time.Unix(1, 0), Line: "line1"},
{Timestamp: time.Unix(2, 0), Line: "line2"},
{Timestamp: time.Unix(3, 0), Line: "line3"},
{Timestamp: time.Unix(4, 0), Line: "line4"},
{Timestamp: time.Unix(5, 0), Line: "line5"},
{Timestamp: time.Unix(6, 0), Line: "line6"},
{Timestamp: time.Unix(7, 0), Line: "line7"},
{Timestamp: time.Unix(8, 0), Line: "line8"},
{Timestamp: time.Unix(9, 0), Line: "line9"},
{Timestamp: time.Unix(10, 0), Line: "line10"},
},
},
logproto.Stream{
{
Labels: "{test=\"two\"}",
Entries: []logproto.Entry{
logproto.Entry{Timestamp: time.Unix(1, 1000), Line: "s2line1"},
logproto.Entry{Timestamp: time.Unix(2, 1000), Line: "s2line2"},
logproto.Entry{Timestamp: time.Unix(3, 1000), Line: "s2line3"},
logproto.Entry{Timestamp: time.Unix(4, 1000), Line: "s2line4"},
logproto.Entry{Timestamp: time.Unix(5, 1000), Line: "s2line5"},
logproto.Entry{Timestamp: time.Unix(6, 1000), Line: "s2line6"},
logproto.Entry{Timestamp: time.Unix(7, 1000), Line: "s2line7"},
logproto.Entry{Timestamp: time.Unix(8, 1000), Line: "s2line8"},
logproto.Entry{Timestamp: time.Unix(9, 1000), Line: "s2line9"},
logproto.Entry{Timestamp: time.Unix(10, 1000), Line: "s2line10"},
{Timestamp: time.Unix(1, 1000), Line: "s2line1"},
{Timestamp: time.Unix(2, 1000), Line: "s2line2"},
{Timestamp: time.Unix(3, 1000), Line: "s2line3"},
{Timestamp: time.Unix(4, 1000), Line: "s2line4"},
{Timestamp: time.Unix(5, 1000), Line: "s2line5"},
{Timestamp: time.Unix(6, 1000), Line: "s2line6"},
{Timestamp: time.Unix(7, 1000), Line: "s2line7"},
{Timestamp: time.Unix(8, 1000), Line: "s2line8"},
{Timestamp: time.Unix(9, 1000), Line: "s2line9"},
{Timestamp: time.Unix(10, 1000), Line: "s2line10"},
},
},
},
@ -334,34 +334,34 @@ func Test_batch(t *testing.T) {
{
name: "two streams backward batch",
streams: []logproto.Stream{
logproto.Stream{
{
Labels: "{test=\"one\"}",
Entries: []logproto.Entry{
logproto.Entry{Timestamp: time.Unix(1, 0), Line: "line1"},
logproto.Entry{Timestamp: time.Unix(2, 0), Line: "line2"},
logproto.Entry{Timestamp: time.Unix(3, 0), Line: "line3"},
logproto.Entry{Timestamp: time.Unix(4, 0), Line: "line4"},
logproto.Entry{Timestamp: time.Unix(5, 0), Line: "line5"},
logproto.Entry{Timestamp: time.Unix(6, 0), Line: "line6"},
logproto.Entry{Timestamp: time.Unix(7, 0), Line: "line7"},
logproto.Entry{Timestamp: time.Unix(8, 0), Line: "line8"},
logproto.Entry{Timestamp: time.Unix(9, 0), Line: "line9"},
logproto.Entry{Timestamp: time.Unix(10, 0), Line: "line10"},
{Timestamp: time.Unix(1, 0), Line: "line1"},
{Timestamp: time.Unix(2, 0), Line: "line2"},
{Timestamp: time.Unix(3, 0), Line: "line3"},
{Timestamp: time.Unix(4, 0), Line: "line4"},
{Timestamp: time.Unix(5, 0), Line: "line5"},
{Timestamp: time.Unix(6, 0), Line: "line6"},
{Timestamp: time.Unix(7, 0), Line: "line7"},
{Timestamp: time.Unix(8, 0), Line: "line8"},
{Timestamp: time.Unix(9, 0), Line: "line9"},
{Timestamp: time.Unix(10, 0), Line: "line10"},
},
},
logproto.Stream{
{
Labels: "{test=\"two\"}",
Entries: []logproto.Entry{
logproto.Entry{Timestamp: time.Unix(1, 1000), Line: "s2line1"},
logproto.Entry{Timestamp: time.Unix(2, 1000), Line: "s2line2"},
logproto.Entry{Timestamp: time.Unix(3, 1000), Line: "s2line3"},
logproto.Entry{Timestamp: time.Unix(4, 1000), Line: "s2line4"},
logproto.Entry{Timestamp: time.Unix(5, 1000), Line: "s2line5"},
logproto.Entry{Timestamp: time.Unix(6, 1000), Line: "s2line6"},
logproto.Entry{Timestamp: time.Unix(7, 1000), Line: "s2line7"},
logproto.Entry{Timestamp: time.Unix(8, 1000), Line: "s2line8"},
logproto.Entry{Timestamp: time.Unix(9, 1000), Line: "s2line9"},
logproto.Entry{Timestamp: time.Unix(10, 1000), Line: "s2line10"},
{Timestamp: time.Unix(1, 1000), Line: "s2line1"},
{Timestamp: time.Unix(2, 1000), Line: "s2line2"},
{Timestamp: time.Unix(3, 1000), Line: "s2line3"},
{Timestamp: time.Unix(4, 1000), Line: "s2line4"},
{Timestamp: time.Unix(5, 1000), Line: "s2line5"},
{Timestamp: time.Unix(6, 1000), Line: "s2line6"},
{Timestamp: time.Unix(7, 1000), Line: "s2line7"},
{Timestamp: time.Unix(8, 1000), Line: "s2line8"},
{Timestamp: time.Unix(9, 1000), Line: "s2line9"},
{Timestamp: time.Unix(10, 1000), Line: "s2line10"},
},
},
},
@ -379,20 +379,20 @@ func Test_batch(t *testing.T) {
{
name: "single stream forward batch identical timestamps",
streams: []logproto.Stream{
logproto.Stream{
{
Labels: "{test=\"simple\"}",
Entries: []logproto.Entry{
logproto.Entry{Timestamp: time.Unix(1, 0), Line: "line1"},
logproto.Entry{Timestamp: time.Unix(2, 0), Line: "line2"},
logproto.Entry{Timestamp: time.Unix(3, 0), Line: "line3"},
logproto.Entry{Timestamp: time.Unix(4, 0), Line: "line4"},
logproto.Entry{Timestamp: time.Unix(5, 0), Line: "line5"},
logproto.Entry{Timestamp: time.Unix(6, 0), Line: "line6"},
logproto.Entry{Timestamp: time.Unix(6, 0), Line: "line6a"},
logproto.Entry{Timestamp: time.Unix(7, 0), Line: "line7"},
logproto.Entry{Timestamp: time.Unix(8, 0), Line: "line8"},
logproto.Entry{Timestamp: time.Unix(9, 0), Line: "line9"},
logproto.Entry{Timestamp: time.Unix(10, 0), Line: "line10"},
{Timestamp: time.Unix(1, 0), Line: "line1"},
{Timestamp: time.Unix(2, 0), Line: "line2"},
{Timestamp: time.Unix(3, 0), Line: "line3"},
{Timestamp: time.Unix(4, 0), Line: "line4"},
{Timestamp: time.Unix(5, 0), Line: "line5"},
{Timestamp: time.Unix(6, 0), Line: "line6"},
{Timestamp: time.Unix(6, 0), Line: "line6a"},
{Timestamp: time.Unix(7, 0), Line: "line7"},
{Timestamp: time.Unix(8, 0), Line: "line8"},
{Timestamp: time.Unix(9, 0), Line: "line9"},
{Timestamp: time.Unix(10, 0), Line: "line10"},
},
},
},
@ -415,21 +415,21 @@ func Test_batch(t *testing.T) {
{
name: "single stream backward batch identical timestamps",
streams: []logproto.Stream{
logproto.Stream{
{
Labels: "{test=\"simple\"}",
Entries: []logproto.Entry{
logproto.Entry{Timestamp: time.Unix(1, 0), Line: "line1"},
logproto.Entry{Timestamp: time.Unix(2, 0), Line: "line2"},
logproto.Entry{Timestamp: time.Unix(3, 0), Line: "line3"},
logproto.Entry{Timestamp: time.Unix(4, 0), Line: "line4"},
logproto.Entry{Timestamp: time.Unix(5, 0), Line: "line5"},
logproto.Entry{Timestamp: time.Unix(6, 0), Line: "line6"},
logproto.Entry{Timestamp: time.Unix(6, 0), Line: "line6a"},
logproto.Entry{Timestamp: time.Unix(6, 0), Line: "line6b"},
logproto.Entry{Timestamp: time.Unix(7, 0), Line: "line7"},
logproto.Entry{Timestamp: time.Unix(8, 0), Line: "line8"},
logproto.Entry{Timestamp: time.Unix(9, 0), Line: "line9"},
logproto.Entry{Timestamp: time.Unix(10, 0), Line: "line10"},
{Timestamp: time.Unix(1, 0), Line: "line1"},
{Timestamp: time.Unix(2, 0), Line: "line2"},
{Timestamp: time.Unix(3, 0), Line: "line3"},
{Timestamp: time.Unix(4, 0), Line: "line4"},
{Timestamp: time.Unix(5, 0), Line: "line5"},
{Timestamp: time.Unix(6, 0), Line: "line6"},
{Timestamp: time.Unix(6, 0), Line: "line6a"},
{Timestamp: time.Unix(6, 0), Line: "line6b"},
{Timestamp: time.Unix(7, 0), Line: "line7"},
{Timestamp: time.Unix(8, 0), Line: "line8"},
{Timestamp: time.Unix(9, 0), Line: "line9"},
{Timestamp: time.Unix(10, 0), Line: "line10"},
},
},
},

@ -43,7 +43,7 @@ func (q *SeriesQuery) DoSeries(c client.Client) {
labelMap[labelName] = &labelDetails{
name: labelName,
inStreams: 1,
uniqueVals: map[string]struct{}{labelValue: struct{}{}},
uniqueVals: map[string]struct{}{labelValue: {}},
}
}
}

@ -182,12 +182,12 @@ func (m *dropStage) Process(labels model.LabelSet, extracted map[string]interfac
level.Debug(m.logger).Log("msg", fmt.Sprintf("line will not be dropped, the provided regular expression did not match the value found in the extracted map for source key: %v", *m.cfg.Source))
}
return
} else {
// regex match, will be dropped
if Debug {
level.Debug(m.logger).Log("msg", "line met drop criteria, regex matched the value in the extracted map source key")
}
}
// regex match, will be dropped
if Debug {
level.Debug(m.logger).Log("msg", "line met drop criteria, regex matched the value in the extracted map source key")
}
} else {
// Not found in extact map, don't drop
if Debug {
@ -204,11 +204,11 @@ func (m *dropStage) Process(labels model.LabelSet, extracted map[string]interfac
level.Debug(m.logger).Log("msg", "line will not be dropped, the provided regular expression did not match the log line")
}
return
} else {
if Debug {
level.Debug(m.logger).Log("msg", "line met drop criteria, the provided regular expression matched the log line")
}
}
if Debug {
level.Debug(m.logger).Log("msg", "line met drop criteria, the provided regular expression matched the log line")
}
} else {
// Not a match to entry was nil, do not drop
if Debug {

@ -38,7 +38,7 @@ pipeline_stages:
func Test_dropStage_Process(t *testing.T) {
// Enable debug logging
cfg := &ww.Config{}
cfg.LogLevel.Set("debug")
require.Nil(t, cfg.LogLevel.Set("debug"))
util.InitLogger(cfg)
Debug = true

@ -6,13 +6,14 @@ import (
"github.com/cortexproject/cortex/pkg/util"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
ww "github.com/weaveworks/common/server"
)
func Test_dropLabelStage_Process(t *testing.T) {
// Enable debug logging
cfg := &ww.Config{}
cfg.LogLevel.Set("debug")
require.Nil(t, cfg.LogLevel.Set("debug"))
util.InitLogger(cfg)
Debug = true

@ -108,12 +108,12 @@ func TestPipeline_Replace(t *testing.T) {
map[string]interface{}{},
`11.11.11.11 - FRANK [25/JAN/2000:14:00:01 -0500] "GET /1986.JS HTTP/1.1" HttpStatusOk 932 "-" "MOZILLA/5.0 (WINDOWS; U; WINDOWS NT 5.1; DE; RV:1.9.1.7) GECKO/20091221 FIREFOX/3.5.7 GTB6"`,
},
"successfully run a pipeline with empty replace value": {
testReplaceYamlWithEmptyReplace,
testReplaceLogLine,
map[string]interface{}{},
`11.11.11.11 - [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"`,
},
"successfully run a pipeline with empty replace value": {
testReplaceYamlWithEmptyReplace,
testReplaceLogLine,
map[string]interface{}{},
`11.11.11.11 - [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"`,
},
}
for testName, testData := range tests {

@ -361,12 +361,12 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
).Wrap(t.frontend.Handler())
var defaultHandler http.Handler
if t.cfg.Frontend.TailProxyUrl != "" {
if t.cfg.Frontend.TailProxyURL != "" {
httpMiddleware := middleware.Merge(
t.httpAuthMiddleware,
queryrange.StatsHTTPMiddleware,
)
tailURL, err := url.Parse(t.cfg.Frontend.TailProxyUrl)
tailURL, err := url.Parse(t.cfg.Frontend.TailProxyURL)
if err != nil {
return nil, err
}

@ -2,16 +2,17 @@ package lokifrontend
import (
"flag"
"github.com/cortexproject/cortex/pkg/querier/frontend"
)
type Config struct {
frontend.Config `yaml:",inline"`
TailProxyUrl string `yaml:"tail_proxy_url"`
TailProxyURL string `yaml:"tail_proxy_url"`
}
// RegisterFlags adds the flags required to config this to the given FlagSet.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.Config.RegisterFlags(f)
f.StringVar(&cfg.TailProxyUrl, "frontend.tail-proxy-url", "", "URL of querier for tail proxy.")
}
f.StringVar(&cfg.TailProxyURL, "frontend.tail-proxy-url", "", "URL of querier for tail proxy.")
}

@ -274,7 +274,7 @@ func (c *client) sendBatch(tenantID string, batch *batch) {
}
}
if lblSet != nil {
streamLag.With(lblSet).Set(time.Now().Sub(s.Entries[len(s.Entries)-1].Timestamp).Seconds())
streamLag.With(lblSet).Set(time.Since(s.Entries[len(s.Entries)-1].Timestamp).Seconds())
}
}
return

@ -28,7 +28,7 @@ func NewMulti(logger log.Logger, externalLabels flagext.LabelSet, cfgs ...Config
// which exist in both the command line arguments as well as the yaml, and while this is
// not typically the order of precedence, the assumption here is someone providing a specific config in
// yaml is doing so explicitly to make a key specific to a client.
cfg.ExternalLabels = flagext.LabelSet{externalLabels.Merge(cfg.ExternalLabels.LabelSet)}
cfg.ExternalLabels = flagext.LabelSet{LabelSet: externalLabels.Merge(cfg.ExternalLabels.LabelSet)}
client, err := New(cfg, logger)
if err != nil {
return nil, err

@ -74,6 +74,7 @@ func (cfg *Config) RegisterFlags(flags *flag.FlagSet) {
}
// FileTarget describes a particular set of logs.
// nolint:golint
type FileTarget struct {
logger log.Logger

@ -46,6 +46,7 @@ var (
)
// FileTargetManager manages a set of targets.
// nolint:golint
type FileTargetManager struct {
log log.Logger
quit context.CancelFunc

@ -133,25 +133,24 @@ func (t *tailer) readLines() {
}()
for {
select {
case line, ok := <-t.tail.Lines:
if !ok {
level.Info(t.logger).Log("msg", "tail routine: tail channel closed, stopping tailer", "path", t.path)
return
}
line, ok := <-t.tail.Lines
if !ok {
level.Info(t.logger).Log("msg", "tail routine: tail channel closed, stopping tailer", "path", t.path)
return
}
// Note currently the tail implementation hardcodes Err to nil, this should never hit.
if line.Err != nil {
level.Error(t.logger).Log("msg", "tail routine: error reading line", "path", t.path, "error", line.Err)
continue
}
// Note currently the tail implementation hardcodes Err to nil, this should never hit.
if line.Err != nil {
level.Error(t.logger).Log("msg", "tail routine: error reading line", "path", t.path, "error", line.Err)
continue
}
readLines.WithLabelValues(t.path).Inc()
logLengthHistogram.WithLabelValues(t.path).Observe(float64(len(line.Text)))
if err := t.handler.Handle(model.LabelSet{}, line.Time, line.Text); err != nil {
level.Error(t.logger).Log("msg", "tail routine: error handling line", "path", t.path, "error", err)
}
readLines.WithLabelValues(t.path).Inc()
logLengthHistogram.WithLabelValues(t.path).Observe(float64(len(line.Text)))
if err := t.handler.Handle(model.LabelSet{}, line.Time, line.Text); err != nil {
level.Error(t.logger).Log("msg", "tail routine: error handling line", "path", t.path, "error", err)
}
}
}
@ -204,7 +203,6 @@ func (t *tailer) stop() {
<-t.done
level.Info(t.logger).Log("msg", "stopped tailing file", "path", t.path)
})
return
}
func (t *tailer) isRunning() bool {

@ -13,6 +13,7 @@ import (
)
// JournalTargetManager manages a series of JournalTargets.
// nolint:golint
type JournalTargetManager struct{}
// NewJournalTargetManager returns nil as JournalTargets are not supported

@ -165,7 +165,6 @@ func (t *PushTarget) handle(w http.ResponseWriter, r *http.Request) {
}
w.WriteHeader(http.StatusNoContent)
return
}
// Type returns PushTargetType.

@ -66,9 +66,8 @@ func validateJobName(scrapeConfigs []scrapeconfig.Config) error {
if _, ok := jobNames[cfg.JobName]; ok {
return fmt.Errorf("`job_name` must be unique for each `push` scrape_config, "+
"a duplicate `job_name` of %s was found", cfg.JobName)
} else {
jobNames[cfg.JobName] = struct{}{}
}
jobNames[cfg.JobName] = struct{}{}
scrapeConfigs[i].JobName = strings.Replace(cfg.JobName, " ", "_", -1)
}

@ -50,17 +50,18 @@ type Shutdownable interface {
Shutdown()
}
type stdinTargetManager struct {
// nolint:golint
type StdinTargetManager struct {
*readerTarget
app Shutdownable
}
func NewStdinTargetManager(log log.Logger, app Shutdownable, client api.EntryHandler, configs []scrapeconfig.Config) (*stdinTargetManager, error) {
func NewStdinTargetManager(log log.Logger, app Shutdownable, client api.EntryHandler, configs []scrapeconfig.Config) (*StdinTargetManager, error) {
reader, err := newReaderTarget(log, stdIn, client, getStdinConfig(log, configs))
if err != nil {
return nil, err
}
stdinManager := &stdinTargetManager{
stdinManager := &StdinTargetManager{
readerTarget: reader,
app: app,
}
@ -84,12 +85,12 @@ func getStdinConfig(log log.Logger, configs []scrapeconfig.Config) scrapeconfig.
return cfg
}
func (t *stdinTargetManager) Ready() bool {
func (t *StdinTargetManager) Ready() bool {
return t.ctx.Err() == nil
}
func (t *stdinTargetManager) Stop() { t.cancel() }
func (t *stdinTargetManager) ActiveTargets() map[string][]target.Target { return nil }
func (t *stdinTargetManager) AllTargets() map[string][]target.Target { return nil }
func (t *StdinTargetManager) Stop() { t.cancel() }
func (t *StdinTargetManager) ActiveTargets() map[string][]target.Target { return nil }
func (t *StdinTargetManager) AllTargets() map[string][]target.Target { return nil }
type readerTarget struct {
in *bufio.Reader

@ -48,6 +48,7 @@ var (
)
// SyslogTarget listens to syslog messages.
// nolint:golint
type SyslogTarget struct {
logger log.Logger
handler api.EntryHandler

@ -12,6 +12,7 @@ import (
)
// SyslogTargetManager manages a series of SyslogTargets.
// nolint:golint
type SyslogTargetManager struct {
logger log.Logger
targets map[string]*SyslogTarget

@ -5,6 +5,7 @@ import (
)
// TargetType is the type of target
// nolint:golint
type TargetType string
const (

@ -2,11 +2,11 @@ package querier
import (
"context"
"github.com/prometheus/prometheus/pkg/labels"
"testing"
"time"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"

@ -19,6 +19,8 @@ var (
NilLogger = log.NewNopLogger()
)
const ruleName = "testrule"
func labelsToMatchers(ls labels.Labels) (res []*labels.Matcher) {
for _, l := range ls {
res = append(res, labels.MustNewMatcher(labels.MatchEqual, l.Name, l.Value))
@ -30,13 +32,12 @@ type MockRuleIter []*rules.AlertingRule
func (xs MockRuleIter) AlertingRules() []*rules.AlertingRule { return xs }
func testStore(queryFunc rules.QueryFunc, itv time.Duration) *MemStore {
return NewMemStore("test", queryFunc, NilMetrics, itv, NilLogger)
func testStore(queryFunc rules.QueryFunc) *MemStore {
return NewMemStore("test", queryFunc, NilMetrics, time.Minute, NilLogger)
}
func TestSelectRestores(t *testing.T) {
ruleName := "testrule"
ars := []*rules.AlertingRule{
rules.NewAlertingRule(
ruleName,
@ -79,7 +80,7 @@ func TestSelectRestores(t *testing.T) {
}, nil
})
store := testStore(fn, time.Minute)
store := testStore(fn)
store.Start(MockRuleIter(ars))
now := util.TimeToMillis(time.Now())
@ -135,7 +136,6 @@ func TestSelectRestores(t *testing.T) {
}
func TestMemstoreStart(t *testing.T) {
ruleName := "testrule"
ars := []*rules.AlertingRule{
rules.NewAlertingRule(
ruleName,
@ -153,13 +153,13 @@ func TestMemstoreStart(t *testing.T) {
return nil, nil
})
store := testStore(fn, time.Minute)
store := testStore(fn)
store.Start(MockRuleIter(ars))
}
func TestMemStoreStopBeforeStart(t *testing.T) {
store := testStore(nil, time.Minute)
store := testStore(nil)
done := make(chan struct{})
go func() {
store.Stop()
@ -173,7 +173,6 @@ func TestMemStoreStopBeforeStart(t *testing.T) {
}
func TestMemstoreBlocks(t *testing.T) {
ruleName := "testrule"
ars := []*rules.AlertingRule{
rules.NewAlertingRule(
ruleName,
@ -191,11 +190,11 @@ func TestMemstoreBlocks(t *testing.T) {
return nil, nil
})
store := testStore(fn, time.Minute)
store := testStore(fn)
done := make(chan struct{})
go func() {
store.Querier(context.Background(), 0, 1)
_, _ = store.Querier(context.Background(), 0, 1)
done <- struct{}{}
}()

@ -6,10 +6,11 @@ import (
"github.com/cortexproject/cortex/pkg/ruler"
cRules "github.com/cortexproject/cortex/pkg/ruler/rules"
"github.com/go-kit/kit/log"
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/ruler/manager"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/ruler/manager"
)
type Config struct {

@ -6,11 +6,12 @@ import (
"time"
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/grafana/loki/pkg/util"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/util"
)
// storeMock is a mockable version of Loki's storage, used in querier unit tests

@ -51,7 +51,7 @@ func getStore() (lstore.Store, error) {
}
schemaCfg := lstore.SchemaConfig{
chunk.SchemaConfig{
SchemaConfig: chunk.SchemaConfig{
Configs: []chunk.PeriodConfig{
{
From: chunk.DayTime{Time: start},

@ -25,9 +25,9 @@ import (
)
var (
currentBoltdbShipperNon24HoursErr = errors.New("boltdb-shipper works best with 24h periodic index config. Either add a new config with future date set to 24h to retain the existing index or change the existing config to use 24h period")
upcomingBoltdbShipperNon24HoursErr = errors.New("boltdb-shipper with future date must always have periodic config for index set to 24h")
zeroLengthConfigError = errors.New("Must specify at least one schema configuration.")
errCurrentBoltdbShipperNon24Hours = errors.New("boltdb-shipper works best with 24h periodic index config. Either add a new config with future date set to 24h to retain the existing index or change the existing config to use 24h period")
errUpcomingBoltdbShipperNon24Hours = errors.New("boltdb-shipper with future date must always have periodic config for index set to 24h")
errZeroLengthConfig = errors.New("must specify at least one schema configuration")
)
// Config is the loki storage configuration
@ -52,18 +52,18 @@ type SchemaConfig struct {
// Validate the schema config and returns an error if the validation doesn't pass
func (cfg *SchemaConfig) Validate() error {
if len(cfg.Configs) == 0 {
return zeroLengthConfigError
return errZeroLengthConfig
}
activePCIndex := ActivePeriodConfig((*cfg).Configs)
// if current index type is boltdb-shipper and there are no upcoming index types then it should be set to 24 hours.
if cfg.Configs[activePCIndex].IndexType == shipper.BoltDBShipperType && cfg.Configs[activePCIndex].IndexTables.Period != 24*time.Hour && len(cfg.Configs)-1 == activePCIndex {
return currentBoltdbShipperNon24HoursErr
return errCurrentBoltdbShipperNon24Hours
}
// if upcoming index type is boltdb-shipper, it should always be set to 24 hours.
if len(cfg.Configs)-1 > activePCIndex && (cfg.Configs[activePCIndex+1].IndexType == shipper.BoltDBShipperType && cfg.Configs[activePCIndex+1].IndexTables.Period != 24*time.Hour) {
return upcomingBoltdbShipperNon24HoursErr
return errUpcomingBoltdbShipperNon24Hours
}
return cfg.SchemaConfig.Validate()

@ -810,7 +810,7 @@ func TestStore_MultipleBoltDBShippersInConfig(t *testing.T) {
nil,
cortex_util.Logger,
)
require.NoError(t, err)
store, err := NewStore(config, schemaConfig, chunkStore, nil)
require.NoError(t, err)
@ -955,7 +955,7 @@ func TestSchemaConfig_Validate(t *testing.T) {
{
name: "empty",
configs: []chunk.PeriodConfig{},
err: zeroLengthConfigError,
err: errZeroLengthConfig,
},
{
name: "NOT using boltdb-shipper",
@ -978,7 +978,7 @@ func TestSchemaConfig_Validate(t *testing.T) {
Period: 7 * 24 * time.Hour,
},
}},
err: currentBoltdbShipperNon24HoursErr,
err: errCurrentBoltdbShipperNon24Hours,
},
{
name: "current config boltdb-shipper with 1 day periodic config, without future index type changes",
@ -1026,7 +1026,7 @@ func TestSchemaConfig_Validate(t *testing.T) {
Period: 7 * 24 * time.Hour,
},
}},
err: upcomingBoltdbShipperNon24HoursErr,
err: errUpcomingBoltdbShipperNon24Hours,
},
{
name: "current config NOT boltdb-shipper, upcoming config boltdb-shipper with 7 days periodic config",
@ -1045,7 +1045,7 @@ func TestSchemaConfig_Validate(t *testing.T) {
Period: 7 * 24 * time.Hour,
},
}},
err: upcomingBoltdbShipperNon24HoursErr,
err: errUpcomingBoltdbShipperNon24Hours,
},
} {
t.Run(tc.name, func(t *testing.T) {

@ -11,13 +11,13 @@ const (
)
type metrics struct {
compactTablesOperationTotal *prometheus.CounterVec
compactTablesOperationTotal *prometheus.CounterVec
compactTablesOperationDurationSeconds prometheus.Gauge
}
func newMetrics(r prometheus.Registerer) *metrics {
m := metrics{
compactTablesOperationTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
compactTablesOperationTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: "loki_boltdb_shipper",
Name: "compact_tables_operation_total",
Help: "Total number of tables compaction done by status",

@ -157,12 +157,10 @@ func (t *table) compact() error {
// read all the errors
for i := 0; i < n; i++ {
select {
case err := <-errChan:
if err != nil && firstErr == nil {
firstErr = err
close(t.quit)
}
err := <-errChan
if err != nil && firstErr == nil {
firstErr = err
close(t.quit)
}
}

@ -15,6 +15,7 @@ import (
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"go.etcd.io/bbolt"
@ -99,7 +100,7 @@ func NewTable(spanCtx context.Context, name, cacheLocation string, storageClient
// init downloads all the db files for the table from object storage.
// it assumes the locking of mutex is taken care of by the caller.
func (t *Table) init(ctx context.Context, spanLogger *spanlogger.SpanLogger) (err error) {
func (t *Table) init(ctx context.Context, spanLogger log.Logger) (err error) {
defer func() {
status := statusSuccess
if err != nil {
@ -454,7 +455,6 @@ func (t *Table) doParallelDownload(ctx context.Context, objects []chunk.StorageO
}
incomingErrors <- err
return
}()
}

@ -9,12 +9,12 @@ import (
"time"
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/local"
"github.com/grafana/loki/pkg/storage/stores/shipper/testutil"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/storage/stores/shipper/testutil"
)
func buildTestTableManager(t *testing.T, path string) (*TableManager, *local.BoltIndexClient, stopFunc) {
func buildTestTableManager(t *testing.T, path string) (*TableManager, stopFunc) {
boltDBIndexClient, fsObjectClient := buildTestClients(t, path)
cachePath := filepath.Join(path, cacheDirName)
@ -26,7 +26,7 @@ func buildTestTableManager(t *testing.T, path string) (*TableManager, *local.Bol
tableManager, err := NewTableManager(cfg, boltDBIndexClient, fsObjectClient, nil)
require.NoError(t, err)
return tableManager, boltDBIndexClient, func() {
return tableManager, func() {
tableManager.Stop()
boltDBIndexClient.Stop()
}
@ -79,7 +79,7 @@ func TestTableManager_QueryPages(t *testing.T) {
testutil.SetupDBTablesAtPath(t, name, objectStoragePath, dbs, true)
}
tableManager, _, stopFunc := buildTestTableManager(t, tempDir)
tableManager, stopFunc := buildTestTableManager(t, tempDir)
defer stopFunc()
testutil.TestMultiTableQuery(t, queries, tableManager, 0, 60)
@ -93,7 +93,7 @@ func TestTableManager_cleanupCache(t *testing.T) {
require.NoError(t, os.RemoveAll(tempDir))
}()
tableManager, _, stopFunc := buildTestTableManager(t, tempDir)
tableManager, stopFunc := buildTestTableManager(t, tempDir)
defer stopFunc()
// one table that would expire and other one won't

@ -38,8 +38,6 @@ func AddRecordsToBatch(batch chunk.WriteBatch, tableName string, start, numRecor
rec := []byte(strconv.Itoa(start + i))
batch.Add(tableName, "", rec, rec)
}
return
}
type SingleTableQuerier interface {

@ -10,8 +10,9 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/local"
"github.com/grafana/loki/pkg/storage/stores/shipper/testutil"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/storage/stores/shipper/testutil"
)
func buildTestTableManager(t *testing.T, testDir string) (*TableManager, *local.BoltIndexClient, stopFunc) {
@ -56,11 +57,11 @@ func TestLoadTables(t *testing.T) {
// table1 with 2 dbs
testutil.SetupDBTablesAtPath(t, "table1", indexPath, map[string]testutil.DBRecords{
"db1": testutil.DBRecords{
"db1": {
Start: 10,
NumRecords: 10,
},
"db2": testutil.DBRecords{
"db2": {
Start: 20,
NumRecords: 10,
},
@ -68,11 +69,11 @@ func TestLoadTables(t *testing.T) {
// table2 with 2 dbs
testutil.SetupDBTablesAtPath(t, "table2", indexPath, map[string]testutil.DBRecords{
"db1": testutil.DBRecords{
"db1": {
Start: 30,
NumRecords: 10,
},
"db2": testutil.DBRecords{
"db2": {
Start: 40,
NumRecords: 10,
},

@ -2,12 +2,13 @@ package util
import (
"context"
"github.com/cortexproject/cortex/pkg/chunk"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
"github.com/stretchr/testify/require"
"strconv"
"sync"
"testing"
"github.com/cortexproject/cortex/pkg/chunk"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
"github.com/stretchr/testify/require"
)
type mockTableQuerier struct {
@ -19,43 +20,43 @@ func (m *mockTableQuerier) MultiQueries(ctx context.Context, queries []chunk.Ind
m.Lock()
defer m.Unlock()
for _, query := range queries{
for _, query := range queries {
m.queries[query.HashValue] = query
}
return nil
}
func (m *mockTableQuerier) hasQueries(t *testing.T, count int) {
func (m *mockTableQuerier) hasQueries(t *testing.T, count int) {
require.Len(t, m.queries, count)
for i := 0; i < count; i++ {
idx := strconv.Itoa(i)
require.Equal(t, m.queries[idx], chunk.IndexQuery{
HashValue: idx,
HashValue: idx,
ValueEqual: []byte(idx),
})
}
}
func TestDoParallelQueries(t *testing.T) {
for _, tc := range []struct{
name string
for _, tc := range []struct {
name string
queryCount int
}{
{
name: "queries < maxQueriesPerGoroutine",
queryCount: maxQueriesPerGoroutine/2,
name: "queries < maxQueriesPerGoroutine",
queryCount: maxQueriesPerGoroutine / 2,
},
{
name: "queries = maxQueriesPerGoroutine",
name: "queries = maxQueriesPerGoroutine",
queryCount: maxQueriesPerGoroutine,
},
{
name: "queries > maxQueriesPerGoroutine",
queryCount: maxQueriesPerGoroutine*2,
name: "queries > maxQueriesPerGoroutine",
queryCount: maxQueriesPerGoroutine * 2,
},
}{
} {
t.Run(tc.name, func(t *testing.T) {
queries := buildQueries(tc.queryCount)
@ -72,18 +73,17 @@ func TestDoParallelQueries(t *testing.T) {
})
}
}
func buildQueries(n int) []chunk.IndexQuery {
queries := make([]chunk.IndexQuery, 0, n)
for i := 0; i < n; i ++ {
for i := 0; i < n; i++ {
idx := strconv.Itoa(i)
queries = append(queries, chunk.IndexQuery{
HashValue: idx,
HashValue: idx,
ValueEqual: []byte(idx),
})
}
return queries
}

@ -8,8 +8,9 @@ import (
"testing"
"github.com/cortexproject/cortex/pkg/chunk/local"
"github.com/grafana/loki/pkg/storage/stores/shipper/testutil"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/storage/stores/shipper/testutil"
)
func Test_GetFileFromStorage(t *testing.T) {

@ -16,8 +16,9 @@ import (
"github.com/cortexproject/cortex/pkg/util"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/grafana/loki/pkg/logproto"
"github.com/prometheus/common/model"
"github.com/grafana/loki/pkg/logproto"
)
const (

Loading…
Cancel
Save