chore: remove copied loop vars (#14383)

pull/14394/head
Trevor Whitney 8 months ago committed by GitHub
parent 833bf0def6
commit b5462b6639
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      .golangci.yml
  2. 1
      clients/pkg/logentry/metric/counters_test.go
  3. 2
      clients/pkg/logentry/stages/decolorize_test.go
  4. 4
      clients/pkg/logentry/stages/eventlogmessage_test.go
  5. 2
      clients/pkg/logentry/stages/extensions_test.go
  6. 4
      clients/pkg/logentry/stages/json_test.go
  7. 2
      clients/pkg/logentry/stages/labels_test.go
  8. 4
      clients/pkg/logentry/stages/logfmt_test.go
  9. 1
      clients/pkg/logentry/stages/metrics_test.go
  10. 2
      clients/pkg/logentry/stages/output_test.go
  11. 3
      clients/pkg/logentry/stages/pipeline_test.go
  12. 4
      clients/pkg/logentry/stages/regex_test.go
  13. 3
      clients/pkg/logentry/stages/replace_test.go
  14. 2
      clients/pkg/logentry/stages/template_test.go
  15. 4
      clients/pkg/logentry/stages/tenant_test.go
  16. 4
      clients/pkg/logentry/stages/timestamp_test.go
  17. 4
      clients/pkg/logentry/stages/util_test.go
  18. 4
      clients/pkg/promtail/client/batch_test.go
  19. 1
      clients/pkg/promtail/client/config_test.go
  20. 1
      clients/pkg/promtail/config/config_test.go
  21. 1
      clients/pkg/promtail/targets/kafka/target_syncer_test.go
  22. 2
      clients/pkg/promtail/targets/kafka/topics_test.go
  23. 3
      clients/pkg/promtail/targets/syslog/syslogtarget_test.go
  24. 2
      pkg/bloombuild/builder/batch_test.go
  25. 13
      pkg/chunkenc/memchunk_test.go
  26. 2
      pkg/compactor/retention/retention_test.go
  27. 1
      pkg/compactor/table_test.go
  28. 1
      pkg/compression/pool_test.go
  29. 4
      pkg/distributor/distributor_test.go
  30. 2
      pkg/distributor/ingestion_rate_strategy_test.go
  31. 1
      pkg/ingester/index/bitprefix_test.go
  32. 1
      pkg/ingester/index/index_test.go
  33. 4
      pkg/ingester/limiter_test.go
  34. 2
      pkg/iter/entry_iterator_test.go
  35. 4
      pkg/logcli/output/default_test.go
  36. 2
      pkg/logcli/output/jsonl_test.go
  37. 2
      pkg/logcli/output/raw_test.go
  38. 2
      pkg/logcli/query/query_test.go
  39. 2
      pkg/loghttp/params.go
  40. 6
      pkg/loghttp/params_test.go
  41. 1
      pkg/loghttp/query_test.go
  42. 4
      pkg/logql/engine_test.go
  43. 1
      pkg/logql/log/parser_hints_test.go
  44. 1
      pkg/logql/log/parser_test.go
  45. 1
      pkg/logql/log/pattern/lexer_test.go
  46. 1
      pkg/logql/log/pattern/parser_test.go
  47. 1
      pkg/logql/log/pattern/pattern_test.go
  48. 3
      pkg/logql/rangemapper_test.go
  49. 1
      pkg/logql/sketch/topk_slow_test.go
  50. 2
      pkg/logql/syntax/ast_test.go
  51. 2
      pkg/logql/syntax/walk_test.go
  52. 2
      pkg/loki/modules.go
  53. 3
      pkg/pattern/drain/drain_test.go
  54. 2
      pkg/pattern/ingester_querier.go
  55. 1
      pkg/pattern/iter/iterator_test.go
  56. 1
      pkg/pattern/iter/merge_test.go
  57. 2
      pkg/querier-rf1/wal/chunks.go
  58. 2
      pkg/querier-rf1/wal/querier.go
  59. 4
      pkg/querier/ingester_querier_test.go
  60. 2
      pkg/querier/querier_test.go
  61. 1
      pkg/querier/queryrange/codec_test.go
  62. 1
      pkg/querier/queryrange/prometheus_test.go
  63. 1
      pkg/querier/queryrange/queryrangebase/promql_test.go
  64. 2
      pkg/querier/queryrange/split_by_range_test.go
  65. 2
      pkg/queue/queue_test.go
  66. 1
      pkg/ruler/base/ruler_test.go
  67. 3
      pkg/storage/batch_test.go
  68. 2
      pkg/storage/bucket/azure/config_test.go
  69. 2
      pkg/storage/bucket/client_test.go
  70. 2
      pkg/storage/bucket/http/config_test.go
  71. 2
      pkg/storage/bucket/s3/config_test.go
  72. 3
      pkg/storage/chunk/client/aws/dynamodb_index_reader.go
  73. 1
      pkg/storage/chunk/client/aws/s3_storage_client_test.go
  74. 1
      pkg/storage/chunk/client/azure/blob_storage_client_test.go
  75. 1
      pkg/storage/chunk/client/gcp/gcs_object_client_test.go
  76. 1
      pkg/storage/chunk/client/openstack/swift_object_client_test.go
  77. 2
      pkg/storage/config/schema_config_test.go
  78. 2
      pkg/storage/lazy_chunk_test.go
  79. 1
      pkg/storage/store.go
  80. 1
      pkg/storage/store_test.go
  81. 1
      pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
  82. 1
      pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index_test.go
  83. 2
      pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator_test.go
  84. 1
      pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor_test.go
  85. 1
      pkg/storage/stores/shipper/indexshipper/downloads/table_manager_test.go
  86. 2
      pkg/storage/wal/segment.go
  87. 1
      pkg/storage/wal/segment_test.go
  88. 1
      pkg/util/marshal/marshal_test.go
  89. 2
      pkg/util/query_string_builder_test.go
  90. 2
      pkg/util/string_test.go
  91. 3
      tools/querytee/proxy_endpoint.go
  92. 2
      tools/querytee/proxy_endpoint_test.go

@ -63,7 +63,7 @@ linters:
- govet
- typecheck
- depguard
- exportloopref
- copyloopvar
- gofmt
- goimports
- gosimple

@ -78,7 +78,6 @@ func Test_validateCounterConfig(t *testing.T) {
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
err := validateCounterConfig(&tt.config)

@ -36,8 +36,6 @@ func TestPipeline_Decolorize(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()

@ -106,7 +106,6 @@ func TestEventLogMessage_simple(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
testData.extractedValues[testData.sourcekey] = testData.msgdata
t.Run(testName, func(t *testing.T) {
@ -151,7 +150,6 @@ func TestEventLogMessageConfig_validate(t *testing.T) {
},
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
_, err := newEventLogMessageStage(util_log.Logger, tt.config)
if tt.err != nil {
@ -262,7 +260,6 @@ func TestEventLogMessage_Real(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
testData.extractedValues[testData.sourcekey] = testData.msgdata
t.Run(testName, func(t *testing.T) {
@ -318,7 +315,6 @@ func TestEventLogMessage_invalid(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
testData.extractedValues[testData.sourcekey] = testData.msgdata
t.Run(testName, func(t *testing.T) {

@ -65,7 +65,6 @@ func TestNewDocker(t *testing.T) {
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
t.Parallel()
p, err := NewDocker(util_log.Logger, prometheus.DefaultRegisterer)
@ -268,7 +267,6 @@ func TestNewCri(t *testing.T) {
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
t.Parallel()
cfg := map[string]interface{}{}

@ -78,8 +78,6 @@ func TestPipeline_JSON(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()
@ -179,7 +177,6 @@ func TestJSONConfig_validate(t *testing.T) {
},
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
c, err := parseJSONConfig(tt.config)
assert.NoError(t, err, "failed to create config: %s", err)
@ -339,7 +336,6 @@ func TestJSONParser_Parse(t *testing.T) {
},
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
t.Parallel()
p, err := New(util_log.Logger, nil, StageTypeJSON, tt.config, nil)

@ -115,7 +115,6 @@ func TestLabels(t *testing.T) {
},
}
for name, test := range tests {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
err := validateLabelsConfig(test.config)
@ -176,7 +175,6 @@ func TestLabelStage_Process(t *testing.T) {
},
}
for name, test := range tests {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
st, err := newLabelStage(util_log.Logger, test.config)

@ -64,8 +64,6 @@ func TestPipeline_Logfmt(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()
@ -153,7 +151,6 @@ func TestLogfmtConfig_validate(t *testing.T) {
},
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
c, err := parseLogfmtConfig(tt.config)
assert.NoError(t, err)
@ -281,7 +278,6 @@ func TestLogfmtParser_Parse(t *testing.T) {
},
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
t.Parallel()
p, err := New(util_log.Logger, nil, StageTypeLogfmt, tt.config, nil)

@ -415,7 +415,6 @@ func TestValidateMetricsConfig(t *testing.T) {
}
for name, test := range tests {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
err := validateMetricsConfig(test.config)

@ -86,7 +86,6 @@ func TestOutputValidation(t *testing.T) {
},
}
for name, test := range tests {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
err := validateOutputConfig(test.config)
@ -120,7 +119,6 @@ func TestOutputStage_Process(t *testing.T) {
},
}
for name, test := range tests {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
st, err := newOutputStage(util_log.Logger, test.config)

@ -194,8 +194,6 @@ func TestPipeline_Process(t *testing.T) {
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
var config map[string]interface{}
@ -304,7 +302,6 @@ func TestPipeline_Wrap(t *testing.T) {
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
t.Parallel()
c := fake.New(func() {})

@ -102,8 +102,6 @@ func TestPipeline_Regex(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()
@ -204,7 +202,6 @@ func TestRegexConfig_validate(t *testing.T) {
},
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
c, err := parseRegexConfig(tt.config)
if err != nil {
@ -322,7 +319,6 @@ func TestRegexParser_Parse(t *testing.T) {
},
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
t.Parallel()
p, err := New(util_log.Logger, nil, StageTypeRegex, tt.config, nil)

@ -161,8 +161,6 @@ func TestPipeline_Replace(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()
@ -252,7 +250,6 @@ func TestReplaceConfig_validate(t *testing.T) {
},
}
for tName, tt := range tests {
tt := tt
t.Run(tName, func(t *testing.T) {
c, err := parseReplaceConfig(tt.config)
if err != nil {

@ -105,7 +105,6 @@ func TestTemplateValidation(t *testing.T) {
},
}
for name, test := range tests {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
_, err := validateTemplateConfig(test.config)
@ -375,7 +374,6 @@ func TestTemplateStage_Process(t *testing.T) {
},
}
for name, test := range tests {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
st, err := newTemplateStage(util_log.Logger, test.config)

@ -126,8 +126,6 @@ func TestTenantStage_Validation(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
stage, err := newTenantStage(util_log.Logger, testData.config)
@ -202,8 +200,6 @@ func TestTenantStage_Process(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
stage, err := newTenantStage(util_log.Logger, testData.config)
require.NoError(t, err)

@ -174,7 +174,6 @@ func TestTimestampValidation(t *testing.T) {
},
}
for name, test := range tests {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
parser, err := validateTimestampConfig(test.config)
@ -295,7 +294,6 @@ func TestTimestampStage_Process(t *testing.T) {
},
}
for name, test := range tests {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
st, err := newTimestampStage(util_log.Logger, test.config)
@ -431,8 +429,6 @@ func TestTimestampStage_ProcessActionOnFailure(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()

@ -145,8 +145,6 @@ func TestConvertDateLayout(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()
@ -224,8 +222,6 @@ func TestParseTimestampWithoutYear(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()

@ -73,8 +73,6 @@ func TestBatch_add(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
b := newBatch(0)
@ -123,8 +121,6 @@ func TestBatch_encode(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()

@ -97,7 +97,6 @@ func Test_Config(t *testing.T) {
},
}
for _, tc := range tests {
tc := tc
err := yaml.Unmarshal([]byte(tc.configValues), &clientConfig)
if tc.expectedErr != nil {

@ -175,7 +175,6 @@ func TestConfig_Setup(t *testing.T) {
},
},
} {
tt := tt
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
tt.in.Setup(log.NewNopLogger())
require.Equal(t, tt.expected, tt.in)

@ -195,7 +195,6 @@ func Test_validateConfig(t *testing.T) {
}
for i, tt := range tests {
tt := tt
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
err := validateConfig(tt.cfg)
if (err != nil) != tt.wantErr {

@ -49,7 +49,6 @@ func Test_NewTopicManager(t *testing.T) {
false,
},
} {
tt := tt
t.Run(strings.Join(tt.in, ","), func(t *testing.T) {
t.Parallel()
_, err := newTopicManager(&mockKafkaClient{}, tt.in)
@ -86,7 +85,6 @@ func Test_Topics(t *testing.T) {
false,
},
} {
tt := tt
t.Run("", func(t *testing.T) {
t.Parallel()

@ -305,7 +305,6 @@ func Benchmark_SyslogTarget(b *testing.B) {
{"tcp", protocolTCP, fmtOctetCounting},
{"udp", protocolUDP, fmtOctetCounting},
} {
tt := tt
b.Run(tt.name, func(b *testing.B) {
client := fake.New(func() {})
@ -366,7 +365,6 @@ func TestSyslogTarget(t *testing.T) {
{"udp newline separated", protocolUDP, fmtNewline},
{"udp octetcounting", protocolUDP, fmtOctetCounting},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
w := log.NewSyncWriter(os.Stderr)
logger := log.NewLogfmtLogger(w)
@ -481,7 +479,6 @@ func TestSyslogTarget_RFC5424Messages(t *testing.T) {
{"tcp newline separated", protocolTCP, fmtNewline},
{"tcp octetcounting", protocolTCP, fmtOctetCounting},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
w := log.NewSyncWriter(os.Stderr)
logger := log.NewLogfmtLogger(w)

@ -106,7 +106,6 @@ func TestBatchedLoader(t *testing.T) {
inputs: [][]int{{0}},
},
} {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
fetchers := make([]Fetcher[int, int], 0, len(tc.inputs))
for range tc.inputs {
@ -193,7 +192,6 @@ func TestOverlappingBlocksIter(t *testing.T) {
exp: 2,
},
} {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
it := overlappingBlocksIter(tc.inp)
var overlapping [][]bloomshipper.BlockRef

@ -86,7 +86,6 @@ const (
func TestBlocksInclusive(t *testing.T) {
for _, enc := range testEncodings {
enc := enc
for _, format := range allPossibleFormats {
chunkfmt, headfmt := format.chunkFormat, format.headBlockFmt
chk := NewMemChunk(chunkfmt, enc, headfmt, testBlockSize, testTargetSize)
@ -105,7 +104,6 @@ func TestBlocksInclusive(t *testing.T) {
func TestBlock(t *testing.T) {
for _, enc := range testEncodings {
enc := enc
for _, format := range allPossibleFormats {
chunkFormat, headBlockFmt := format.chunkFormat, format.headBlockFmt
t.Run(fmt.Sprintf("encoding:%v chunkFormat:%v headBlockFmt:%v", enc, chunkFormat, headBlockFmt), func(t *testing.T) {
@ -260,7 +258,6 @@ func TestBlock(t *testing.T) {
func TestCorruptChunk(t *testing.T) {
for _, enc := range testEncodings {
enc := enc
for _, format := range allPossibleFormats {
chunkfmt, headfmt := format.chunkFormat, format.headBlockFmt
@ -337,7 +334,6 @@ func TestReadFormatV1(t *testing.T) {
func TestRoundtripV2(t *testing.T) {
for _, testData := range allPossibleFormats {
for _, enc := range testEncodings {
enc := enc
t.Run(testNameWithFormats(enc, testData.chunkFormat, testData.headBlockFmt), func(t *testing.T) {
t.Parallel()
@ -397,7 +393,6 @@ func testNameWithFormats(enc compression.Codec, chunkFormat byte, headBlockFmt H
func TestRoundtripV3(t *testing.T) {
for _, enc := range testEncodings {
enc := enc
for _, format := range allPossibleFormats {
chunkfmt, headfmt := format.chunkFormat, format.headBlockFmt
t.Run(fmt.Sprintf("%v-%v", format, enc), func(t *testing.T) {
@ -422,10 +417,8 @@ func TestRoundtripV3(t *testing.T) {
func TestSerialization(t *testing.T) {
for _, testData := range allPossibleFormats {
for _, enc := range testEncodings {
enc := enc
// run tests with and without structured metadata since it is optional
for _, appendWithStructuredMetadata := range []bool{false, true} {
appendWithStructuredMetadata := appendWithStructuredMetadata
testName := testNameWithFormats(enc, testData.chunkFormat, testData.headBlockFmt)
if appendWithStructuredMetadata {
testName = fmt.Sprintf("%s - append structured metadata", testName)
@ -511,7 +504,6 @@ func TestSerialization(t *testing.T) {
func TestChunkFilling(t *testing.T) {
for _, testData := range allPossibleFormats {
for _, enc := range testEncodings {
enc := enc
t.Run(testNameWithFormats(enc, testData.chunkFormat, testData.headBlockFmt), func(t *testing.T) {
t.Parallel()
@ -676,8 +668,6 @@ func TestMemChunk_AppendOutOfOrder(t *testing.T) {
for _, f := range HeadBlockFmts {
for testName, tester := range tests {
tester := tester
t.Run(testName, func(t *testing.T) {
t.Parallel()
@ -1117,7 +1107,6 @@ func TestMemChunk_IteratorBounds(t *testing.T) {
t.Run(
fmt.Sprintf("mint:%d,maxt:%d,direction:%s", tt.mint.UnixNano(), tt.maxt.UnixNano(), tt.direction),
func(t *testing.T) {
tt := tt
c := createChunk()
noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{})
@ -1143,7 +1132,6 @@ func TestMemChunk_IteratorBounds(t *testing.T) {
func TestMemchunkLongLine(t *testing.T) {
for _, enc := range testEncodings {
enc := enc
t.Run(enc.String(), func(t *testing.T) {
t.Parallel()
@ -1777,7 +1765,6 @@ func TestMemChunk_SpaceFor(t *testing.T) {
func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) {
for _, enc := range testEncodings {
enc := enc
t.Run(enc.String(), func(t *testing.T) {
streamLabels := labels.Labels{
{Name: "job", Value: "fake"},

@ -154,7 +154,6 @@ func Test_Retention(t *testing.T) {
},
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
// insert in the store.
var (
@ -566,7 +565,6 @@ func TestChunkRewriter(t *testing.T) {
},
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
store := newTestStore(t)
require.NoError(t, store.Put(context.TODO(), []chunk.Chunk{tt.chunk}))

@ -350,7 +350,6 @@ func TestTable_CompactionRetention(t *testing.T) {
}),
},
} {
tt := tt
commonDBsConfig := IndexesConfig{
NumCompactedFiles: tt.dbsSetup.numCompactedDBs,
NumUnCompactedFiles: tt.dbsSetup.numUnCompactedCommonDBs,

@ -16,7 +16,6 @@ import (
func TestPool(t *testing.T) {
for _, enc := range supportedCodecs {
enc := enc
t.Run(enc.String(), func(t *testing.T) {
var wg sync.WaitGroup

@ -401,8 +401,6 @@ func Test_IncrementTimestamp(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
ing := &mockIngester{}
distributors, _ := prepare(t, 1, 3, testData.limits, func(_ string) (ring_client.PoolClient, error) { return ing, nil })
@ -1216,8 +1214,6 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
limits := &validation.Limits{}
flagext.DefaultValues(limits)

@ -63,8 +63,6 @@ func TestIngestionRateStrategy(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
var strategy limiter.RateLimiterStrategy

@ -38,7 +38,6 @@ func Test_BitPrefixGetShards(t *testing.T) {
{8, true, logql.NewPowerOfTwoShard(index.ShardAnnotation{Shard: 4, Of: 16}).Ptr(), []uint32{2}},
{8, true, logql.NewPowerOfTwoShard(index.ShardAnnotation{Shard: 15, Of: 16}).Ptr(), []uint32{7}},
} {
tt := tt
t.Run(tt.shard.String()+fmt.Sprintf("_total_%d", tt.total), func(t *testing.T) {
ii, err := NewBitPrefixWithShards(tt.total)
require.Nil(t, err)

@ -32,7 +32,6 @@ func Test_GetShards(t *testing.T) {
{32, &index.ShardAnnotation{Shard: 15, Of: 16}, []uint32{15, 31}},
{64, &index.ShardAnnotation{Shard: 15, Of: 16}, []uint32{15, 31, 47, 63}},
} {
tt := tt
t.Run(tt.shard.String()+fmt.Sprintf("_total_%d", tt.total), func(t *testing.T) {
ii := NewWithShards(tt.total)
res := ii.getShards(tt.shard)

@ -119,8 +119,6 @@ func TestStreamCountLimiter_AssertNewStreamAllowed(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
// Mock limits
limits, err := validation.NewOverrides(validation.Limits{
@ -183,8 +181,6 @@ func TestLimiter_minNonZero(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
limiter := NewLimiter(nil, NilMetrics, nil)
assert.Equal(t, testData.expected, limiter.minNonZero(testData.first, testData.second))

@ -178,8 +178,6 @@ func TestMergeIteratorPrefetch(t *testing.T) {
}
for testName, testFunc := range tests {
testFunc := testFunc
t.Run(testName, func(t *testing.T) {
t.Parallel()

@ -79,8 +79,6 @@ func TestDefaultOutput_Format(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()
writer := &bytes.Buffer{}
@ -168,8 +166,6 @@ func TestColorForLabels(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()
labelsColor := getColor(testData.labels.String())

@ -63,8 +63,6 @@ func TestJSONLOutput_Format(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()
writer := &bytes.Buffer{}

@ -61,8 +61,6 @@ func TestRawOutput_Format(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()

@ -887,8 +887,6 @@ func TestParallelJobs(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(
tt.name,
func(t *testing.T) {

@ -49,7 +49,7 @@ func lineLimit(r *http.Request) (uint32, error) {
func detectedFieldsLimit(r *http.Request) (uint32, error) {
limit := r.Form.Get("limit")
if limit == "" {
// for backwards compatability
// for backwards compatibility
limit = r.Form.Get("field_limit")
}

@ -39,8 +39,6 @@ func TestHttp_defaultQueryRangeStep(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
assert.Equal(t, testData.expected, defaultQueryRangeStep(testData.start, testData.end))
})
@ -123,8 +121,6 @@ func TestHttp_ParseRangeQuery_Step(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
req := httptest.NewRequest("GET", testData.reqPath, nil)
err := req.ParseForm()
@ -176,8 +172,6 @@ func Test_interval(t *testing.T) {
},
}
for _, testData := range tests {
testData := testData
t.Run(testData.name, func(t *testing.T) {
req := httptest.NewRequest("GET", testData.reqPath, nil)
err := req.ParseForm()

@ -280,7 +280,6 @@ func Test_QueryResponseUnmarshal(t *testing.T) {
},
},
} {
tt := tt
t.Run("", func(t *testing.T) {
b, err := jsoniter.Marshal(tt)
require.Nil(t, err)

@ -147,7 +147,6 @@ func TestEngine_LogsRateUnwrap(t *testing.T) {
promql.Vector{promql.Sample{T: 60 * 1000, F: 0.46666766666666665, Metric: labels.FromStrings("app", "foo")}},
},
} {
test := test
t.Run(fmt.Sprintf("%s %s", test.qs, test.direction), func(t *testing.T) {
t.Parallel()
@ -954,7 +953,6 @@ func TestEngine_InstantQuery(t *testing.T) {
},
},
} {
test := test
t.Run(fmt.Sprintf("%s %s", test.qs, test.direction), func(t *testing.T) {
eng := NewEngine(EngineOpts{}, newQuerierRecorder(t, test.data, test.params), NoLimits, log.NewNopLogger())
@ -2256,7 +2254,6 @@ func TestEngine_RangeQuery(t *testing.T) {
},
},
} {
test := test
t.Run(fmt.Sprintf("%s %s", test.qs, test.direction), func(t *testing.T) {
t.Parallel()
@ -2425,7 +2422,6 @@ func TestStepEvaluator_Error(t *testing.T) {
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
eng := NewEngine(EngineOpts{}, tc.querier, NoLimits, log.NewNopLogger())

@ -225,7 +225,6 @@ func Test_ParserHints(t *testing.T) {
`{app="nginx", message_message="foo"}`,
},
} {
tt := tt
t.Run(tt.expr, func(t *testing.T) {
t.Parallel()
expr, err := syntax.ParseSampleExpr(tt.expr)

@ -1406,7 +1406,6 @@ func Test_PatternParser(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.pattern, func(t *testing.T) {
t.Parallel()
b := NewBaseLabelsBuilder().ForLabels(tt.lbs, tt.lbs.Hash())

@ -20,7 +20,6 @@ func Test_Lex(t *testing.T) {
{`<1foo>`, []int{LITERAL, LITERAL, LITERAL, LITERAL, LITERAL, LITERAL}},
{``, []int{LITERAL}},
} {
tc := tc
t.Run(tc.input, func(t *testing.T) {
actual := []int{}
l := newLexer()

@ -53,7 +53,6 @@ func Test_Parse(t *testing.T) {
nil,
},
} {
tc := tc
actual, err := parseExpr(tc.input)
if tc.err != nil || err != nil {
require.Equal(t, tc.err, err)

@ -176,7 +176,6 @@ func Test_BytesIndexUnicode(t *testing.T) {
func Test_matcher_Matches(t *testing.T) {
for _, tt := range fixtures {
tt := tt
t.Run(tt.expr, func(t *testing.T) {
t.Parallel()
m, err := New(tt.expr)

@ -75,7 +75,6 @@ func Test_SplitRangeInterval(t *testing.T) {
2,
},
} {
tc := tc
t.Run(tc.expr, func(t *testing.T) {
t.Parallel()
@ -1811,7 +1810,6 @@ func Test_SplitRangeVectorMapping(t *testing.T) {
3,
},
} {
tc := tc
t.Run(tc.expr, func(t *testing.T) {
t.Parallel()
@ -2002,7 +2000,6 @@ func Test_SplitRangeVectorMapping_Noop(t *testing.T) {
`vector(0.000000)`,
},
} {
tc := tc
t.Run(tc.expr, func(t *testing.T) {
t.Parallel()

@ -135,7 +135,6 @@ func TestCMSTopk(t *testing.T) {
}
for _, tc := range testcases {
tc := tc
t.Run(fmt.Sprintf("num_streams/%d_k/%d_iterations/%d", tc.numStreams, tc.k, tc.iterations), func(t *testing.T) {
t.Parallel()
missing := 0

@ -53,7 +53,6 @@ func Test_logSelectorExpr_String(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.selector, func(t *testing.T) {
t.Parallel()
expr, err := ParseLogSelector(tt.selector, true)
@ -588,7 +587,6 @@ func Test_FilterMatcher(t *testing.T) {
[]linecheck{{"counter=1", false}, {"counter=0", false}, {"counter=-1", true}, {"counter=-2", true}},
},
} {
tt := tt
t.Run(tt.q, func(t *testing.T) {
t.Parallel()
expr, err := ParseLogSelector(tt.q, true)

@ -26,7 +26,6 @@ func Test_Walkable(t *testing.T) {
},
}
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
expr, err := ParseExpr(test.expr)
require.Nil(t, err)
@ -72,7 +71,6 @@ func Test_AppendMatchers(t *testing.T) {
},
}
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
expr, err := ParseExpr(test.expr)
require.NoError(t, err)

@ -1484,8 +1484,6 @@ func (t *Loki) initIndexGateway() (services.Service, error) {
var indexClients []indexgateway.IndexClientWithRange
for i, period := range t.Cfg.SchemaConfig.Configs {
period := period
if period.IndexType != types.BoltDBShipperType {
continue
}

@ -426,7 +426,6 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.inputFile, func(t *testing.T) {
file, err := os.Open(tt.inputFile)
require.NoError(t, err)
@ -529,7 +528,6 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T)
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
for _, line := range tt.inputLines {
tt.drain.Train(line, 0)
@ -630,7 +628,6 @@ func TestDrain_PruneTreeClearsOldBranches(t *testing.T) {
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
now := time.Now()
for i, line := range tt.inputLines {

@ -146,8 +146,6 @@ func (q *IngesterQuerier) forGivenIngesters(ctx context.Context, replicationSet
responses := make([]ResponseFromIngesters, len(replicationSet.Instances))
for i, ingester := range replicationSet.Instances {
ingester := ingester
i := i
g.Go(func() error {
client, err := q.ringClient.GetClientFor(ingester.Addr)
if err != nil {

@ -48,7 +48,6 @@ func TestSliceIterator(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
got := slice(NewSlice(tt.pattern, tt.samples))
require.Equal(t, tt.want, got)

@ -62,7 +62,6 @@ func TestMerge(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
it := NewMerge(tt.iterators...)
defer it.Close()

@ -285,8 +285,6 @@ func downloadChunks(ctx context.Context, storage BlockStorage, chks []ChunkData)
g, ctx := errgroup.WithContext(ctx)
g.SetLimit(64)
for i, chunk := range chks {
chunk := chunk
i := i
g.Go(func() error {
chunkData, err := readChunkData(ctx, storage, chunk)
if err != nil {

@ -178,8 +178,6 @@ func (q *Querier) forIndices(ctx context.Context, req *metastorepb.ListBlocksFor
g, ctx := errgroup.WithContext(ctx)
g.SetLimit(32)
for _, meta := range metas {
meta := meta
g.Go(func() error {
reader, err := q.blockStorage.GetObjectRange(ctx, wal.Dir+meta.Id, meta.IndexRef.Offset, meta.IndexRef.Length)
if err != nil {

@ -72,7 +72,6 @@ func TestIngesterQuerier_earlyExitOnQuorum(t *testing.T) {
for testName, testData := range tests {
for _, retErr := range []bool{true, false} {
testName, testData, retErr := testName, testData, retErr
if retErr {
testName += " call should return early on breaching max errors"
} else {
@ -168,7 +167,6 @@ func TestIngesterQuerier_earlyExitOnQuorum(t *testing.T) {
for testName, testData := range tests {
for _, retErr := range []bool{true, false} {
testName, testData, retErr := testName, testData, retErr
if retErr {
testName += " call should not return early on breaching max errors"
} else {
@ -277,8 +275,6 @@ func TestQuerier_tailDisconnectedIngesters(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
req := logproto.TailRequest{
Query: "{type=\"test\"}",

@ -529,8 +529,6 @@ func TestQuerier_concurrentTailLimits(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
// For this test's purpose, whenever a new ingester client needs to
// be created, the factory will always return the same mock instance

@ -837,7 +837,6 @@ func Test_codec_DecodeProtobufResponseParity(t *testing.T) {
}
codec := RequestProtobufCodec{}
for i, queryTest := range queryTests {
i := i
t.Run(queryTest.name, func(t *testing.T) {
params := url.Values{
"query": []string{`{app="foo"}`},

@ -269,7 +269,6 @@ func Test_encodePromResponse(t *testing.T) {
}`,
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
r, err := tt.resp.encode(context.Background())
require.NoError(t, err)

@ -314,7 +314,6 @@ func Test_PromQL(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.normalQuery, func(t *testing.T) {
baseQuery, err := engine.NewRangeQuery(context.Background(), shardAwareQueryable, nil, tt.normalQuery, start, end, step)

@ -256,7 +256,6 @@ func Test_RangeVectorSplitAlign(t *testing.T) {
expected: expectedMergedResponseWithTime(1+2+3+4, twelve34),
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
srm := NewSplitByRangeMiddleware(log.NewNopLogger(), testEngineOpts, fakeLimits{
maxSeries: 10000,
@ -408,7 +407,6 @@ func Test_RangeVectorSplit(t *testing.T) {
expected: expectedMergedResponse(1 + 2 + 3),
},
} {
tc := tc
t.Run(tc.in.GetQuery(), func(t *testing.T) {
resp, err := srm.Wrap(queryrangebase.HandlerFunc(
func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {

@ -44,8 +44,6 @@ func BenchmarkGetNextRequest(b *testing.B) {
}
for _, benchCase := range benchCases {
benchCase := benchCase
b.Run(benchCase.name, func(b *testing.B) {
queues := make([]*RequestQueue, 0, b.N)

@ -1763,7 +1763,6 @@ func TestSendAlerts(t *testing.T) {
}
for i, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
if len(tc.in) == 0 {

@ -100,7 +100,6 @@ func Test_newLogBatchChunkIterator(t *testing.T) {
var tests map[string]testCase
for _, periodConfig := range periodConfigs {
periodConfig := periodConfig
chunkfmt, headfmt, err := periodConfig.ChunkFormat()
require.NoError(t, err)
@ -1000,7 +999,6 @@ func Test_newLogBatchChunkIterator(t *testing.T) {
for _, schemaConfig := range schemaConfigs {
s := schemaConfig
for name, tt := range tests {
tt := tt
t.Run(name, func(t *testing.T) {
it, err := newLogBatchIterator(context.Background(), s, NilMetrics, tt.chunks, tt.batchSize, newMatchers(tt.matchers), log.NewNoopPipeline(), tt.direction, tt.start, tt.end, nil)
require.NoError(t, err)
@ -1416,7 +1414,6 @@ func Test_newSampleBatchChunkIterator(t *testing.T) {
}
for name, tt := range tests {
tt := tt
t.Run(name, func(t *testing.T) {
ex, err := log.NewLineSampleExtractor(log.CountExtractor, nil, nil, false, false)
require.NoError(t, err)

@ -86,8 +86,6 @@ http:
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
cfg := Config{}
flagext.DefaultValues(&cfg)

@ -69,8 +69,6 @@ func TestNewClient(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
// Load config
cfg := Config{}

@ -65,8 +65,6 @@ max_connections_per_host: 8
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
cfg := Config{}
flagext.DefaultValues(&cfg)

@ -111,8 +111,6 @@ http:
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
cfg := Config{}
flagext.DefaultValues(&cfg)

@ -80,8 +80,7 @@ func (r *dynamodbIndexReader) ReadIndexEntries(ctx context.Context, tableName st
var readerGroup errgroup.Group
// Start a goroutine for each processor
for i, processor := range processors {
segment, processor := i, processor // https://golang.org/doc/faq#closures_and_goroutines
for segment, processor := range processors {
readerGroup.Go(func() error {
input := &dynamodb.ScanInput{
TableName: aws.String(tableName),

@ -272,7 +272,6 @@ func Test_Hedging(t *testing.T) {
},
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
count := atomic.NewInt32(0)

@ -118,7 +118,6 @@ func Test_Hedging(t *testing.T) {
},
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
count := atomic.NewInt32(0)
// hijack the client to count the number of calls

@ -55,7 +55,6 @@ func Test_Hedging(t *testing.T) {
},
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
count := atomic.NewInt32(0)
server := fakeServer(t, 200*time.Millisecond, count)

@ -58,7 +58,6 @@ func Test_Hedging(t *testing.T) {
},
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
count := atomic.NewInt32(0)
// hijack the transport to count the number of calls

@ -384,8 +384,6 @@ func TestSchemaConfig_Validate(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
actual := testData.config.Validate()
assert.ErrorIs(t, actual, testData.err)

@ -38,8 +38,6 @@ func TestLazyChunkIterator(t *testing.T) {
}
for _, periodConfig := range periodConfigs {
periodConfig := periodConfig
chunkfmt, headfmt, err := periodConfig.ChunkFormat()
require.NoError(t, err)

@ -194,7 +194,6 @@ func NewStore(cfg Config, storeCfg config.ChunkStoreConfig, schemaCfg config.Sch
func (s *LokiStore) init() error {
for i, p := range s.schemaCfg.Configs {
p := p
chunkClient, err := s.chunkClientForPeriod(p)
if err != nil {
return err

@ -1771,7 +1771,6 @@ func Test_GetSeries(t *testing.T) {
[]logproto.SeriesIdentifier{},
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
if tt.req.Selector != "" {
tt.req.Plan = &plan.QueryPlan{

@ -176,7 +176,6 @@ func TestFetcher_DownloadQueue(t *testing.T) {
size: 1, workers: 0, err: "queue requires at least 1 worker",
},
} {
tc := tc
t.Run(tc.err, func(t *testing.T) {
_, err := newDownloadQueue[bool, bool](
tc.size,

@ -23,7 +23,6 @@ import (
func TestCompactedIndex_IndexProcessor(t *testing.T) {
for _, tt := range allSchemas {
tt := tt
t.Run(tt.schema, func(t *testing.T) {
cm := storage.NewClientMetrics()
defer cm.Unregister()

@ -24,7 +24,6 @@ import (
func Test_ChunkIterator(t *testing.T) {
for _, tt := range allSchemas {
tt := tt
t.Run(tt.schema, func(t *testing.T) {
cm := storage.NewClientMetrics()
defer cm.Unregister()
@ -108,7 +107,6 @@ func Test_ChunkIteratorContextCancelation(t *testing.T) {
func Test_SeriesCleaner(t *testing.T) {
for _, tt := range allSchemas {
tt := tt
t.Run(tt.schema, func(t *testing.T) {
cm := storage.NewClientMetrics()
defer cm.Unregister()

@ -368,7 +368,6 @@ func TestTable_RecreateCompactedDB(t *testing.T) {
shouldRecreateCompactedDB: true,
},
} {
tt := tt
t.Run(name, func(t *testing.T) {
if !tt.compactedDBMtime.IsZero() {
require.Equal(t, 1, tt.dbCount)

@ -319,7 +319,6 @@ func TestTableManager_ensureQueryReadiness(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
tc := tc // just to make the linter happy
resetTables()
tableManager.cfg.QueryReadyNumDays = tc.queryReadyNumDaysCfg
tableManager.cfg.Limits = &tc.queryReadinessLimits

@ -231,7 +231,6 @@ func (b *SegmentWriter) Meta(id string) *metastorepb.BlockMeta {
}
result := make([]*metastorepb.TenantStreams, 0, len(tenants))
for _, tenant := range tenants {
tenant := tenant
result = append(result, tenant)
}
sort.Slice(result, func(i, j int) bool {
@ -379,7 +378,6 @@ func (b *SegmentWriter) Reset() {
b.firstAppend = time.Time{}
b.lastAppend = time.Time{}
for _, s := range b.streams {
s := s
s.Reset()
streamSegmentPool.Put(s)
}

@ -101,7 +101,6 @@ func TestWalSegmentWriter_Append(t *testing.T) {
// Run the test cases
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
// Create a new WalSegmentWriter

@ -1125,7 +1125,6 @@ func Test_WriteQueryPatternsResponseJSON(t *testing.T) {
`{"status":"success","data":[{"pattern":"foo <*> bar","samples":[]},{"pattern":"foo <*> buzz","samples":[]}]}`,
},
} {
tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
var b bytes.Buffer
err := WriteQueryPatternsResponseJSON(tc.input, &b)

@ -33,8 +33,6 @@ func TestQueryStringBuilder(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
params := NewQueryStringBuilder()

@ -27,8 +27,6 @@ func TestStringsContain(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
t.Parallel()

@ -109,9 +109,6 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back
wg.Add(len(p.backends))
for i, b := range p.backends {
i := i
b := b
go func() {
defer wg.Done()
var (

@ -95,8 +95,6 @@ func Test_ProxyEndpoint_waitBackendResponseForDownstream(t *testing.T) {
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
endpoint := NewProxyEndpoint(testData.backends, "test", NewProxyMetrics(nil), log.NewNopLogger(), nil, false)

Loading…
Cancel
Save