chore: Linting update for new golangci (#16572)

pull/16575/head
Paul Rogers 10 months ago committed by GitHub
parent a38bba9d90
commit 27431b7e7e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      clients/pkg/logentry/stages/labelallow.go
  2. 2
      clients/pkg/logentry/stages/labeldrop.go
  3. 4
      clients/pkg/promtail/client/client_writeto_test.go
  4. 26
      clients/pkg/promtail/wal/timer.go
  5. 12
      clients/pkg/promtail/wal/timer_test.go
  6. 2
      pkg/analytics/seed.go
  7. 24
      pkg/analytics/stats.go
  8. 2
      pkg/blockbuilder/scheduler/status.go
  9. 4
      pkg/bloombuild/builder/batch_test.go
  10. 8
      pkg/bloombuild/planner/plannertest/utils.go
  11. 4
      pkg/bloombuild/planner/tableIterator.go
  12. 4
      pkg/bloombuild/planner/versioned_range_test.go
  13. 8
      pkg/bloomgateway/util.go
  14. 8
      pkg/chunkenc/util_test.go
  15. 10
      pkg/compactor/generationnumber/gennumber_loader.go
  16. 2
      pkg/dataobj/internal/dataset/column_test.go
  17. 8
      pkg/distributor/tee.go
  18. 6
      pkg/ingester/checkpoint_test.go
  19. 2
      pkg/ingester/owned_streams.go
  20. 8
      pkg/iter/cache.go
  21. 4
      pkg/iter/v2/iter.go
  22. 12
      pkg/loghttp/push/push.go
  23. 10
      pkg/logql/log/filter.go
  24. 16
      pkg/logql/range_vector.go
  25. 22
      pkg/logql/sketch/cms.go
  26. 8
      pkg/logql/sketch/cms_test.go
  27. 18
      pkg/logql/sketch/topk_test.go
  28. 4
      pkg/logql/syntax/query_scanner.go
  29. 4
      pkg/pattern/iter/merge.go
  30. 4
      pkg/querier/queryrange/limits.go
  31. 8
      pkg/querier/queryrange/limits_test.go
  32. 8
      pkg/storage/bloom/v1/bounds.go
  33. 10
      pkg/storage/bloom/v1/builder_test.go
  34. 4
      pkg/storage/bloom/v1/filter/buckets_test.go
  35. 16
      pkg/storage/chunk/client/aws/dynamodb_storage_client.go
  36. 4
      pkg/storage/chunk/client/hedging/hedging.go
  37. 4
      pkg/storage/stores/shipper/bloomshipper/cache.go
  38. 6
      pkg/storage/stores/shipper/indexshipper/tsdb/index/postings.go
  39. 6
      pkg/storage/stores/shipper/indexshipper/tsdb/index/postingsstats.go
  40. 10
      pkg/storage/stores/shipper/indexshipper/tsdb/index/postingsstats_test.go
  41. 6
      pkg/storage/stores/shipper/indexshipper/tsdb/sharding/sharding_test.go
  42. 6
      pkg/tool/rules/compare.go
  43. 4
      pkg/util/loser/tree.go

@ -17,7 +17,7 @@ const (
type LabelAllowConfig []string
func validateLabelAllowConfig(c LabelAllowConfig) error {
if c == nil || len(c) < 1 {
if len(c) < 1 {
return errors.New(ErrEmptyLabelAllowStageConfig)
}

@ -17,7 +17,7 @@ const (
type LabelDropConfig []string
func validateLabelDropConfig(c LabelDropConfig) error {
if c == nil || len(c) < 1 {
if len(c) < 1 {
return errors.New(ErrEmptyLabelDropStageConfig)
}

@ -222,9 +222,9 @@ func bench(numWriters, totalLines int, b *testing.B) {
// 4. After all are written, call a SeriesReset. This will block the entire series map and will hopefully block
// some other writing routine.
func startWriter(segmentNum, seriesToReset int, target *clientWriteTo, lines int, series record.RefSeries, maxInitialSleep time.Duration) {
randomSleepMax := func(max time.Duration) {
randomSleepMax := func(maxVal time.Duration) {
// random sleep to add some jitter
s := int64(rand.Uint64()) % int64(max)
s := int64(rand.Uint64()) % int64(maxVal)
time.Sleep(time.Duration(s))
}
// random sleep to add some jitter

@ -5,34 +5,34 @@ import "time"
// backoffTimer is a time.Timer that allows one to move between a minimum and maximum interval, using an exponential backoff
// strategy. It safely re-uses just one time.Timer instance internally.
type backoffTimer struct {
timer *time.Timer
curr, min, max time.Duration
C <-chan time.Time
timer *time.Timer
curr, minVal, maxVal time.Duration
C <-chan time.Time
}
func newBackoffTimer(min, max time.Duration) *backoffTimer {
func newBackoffTimer(minVal, maxVal time.Duration) *backoffTimer {
// note that the first timer created will be stopped without ever consuming it, since it's once we can omit it
// since the timer is recycled, we can keep the channel
t := time.NewTimer(min)
t := time.NewTimer(minVal)
return &backoffTimer{
timer: t,
min: min,
max: max,
curr: min,
C: t.C,
timer: t,
minVal: minVal,
maxVal: maxVal,
curr: minVal,
C: t.C,
}
}
func (bt *backoffTimer) backoff() {
bt.curr = bt.curr * 2
if bt.curr > bt.max {
bt.curr = bt.max
if bt.curr > bt.maxVal {
bt.curr = bt.maxVal
}
bt.recycle()
}
func (bt *backoffTimer) reset() {
bt.curr = bt.min
bt.curr = bt.minVal
bt.recycle()
}

@ -12,23 +12,23 @@ const (
)
func TestBackoffTimer(t *testing.T) {
var min = time.Millisecond * 300
var max = time.Second
timer := newBackoffTimer(min, max)
var minVal = time.Millisecond * 300
var maxVal = time.Second
timer := newBackoffTimer(minVal, maxVal)
now := time.Now()
<-timer.C
require.WithinDuration(t, now.Add(min), time.Now(), delta, "expected backing off timer to fire in the minimum")
require.WithinDuration(t, now.Add(minVal), time.Now(), delta, "expected backing off timer to fire in the minimum")
// backoff, and expect it will take twice the time
now = time.Now()
timer.backoff()
<-timer.C
require.WithinDuration(t, now.Add(min*2), time.Now(), delta, "expected backing off timer to fire in the twice the minimum")
require.WithinDuration(t, now.Add(minVal*2), time.Now(), delta, "expected backing off timer to fire in the twice the minimum")
// backoff capped, backoff will actually be 1200ms, but capped at 1000
now = time.Now()
timer.backoff()
<-timer.C
require.WithinDuration(t, now.Add(max), time.Now(), delta, "expected backing off timer to fire in the max")
require.WithinDuration(t, now.Add(maxVal), time.Now(), delta, "expected backing off timer to fire in the max")
}

@ -20,7 +20,7 @@ type ClusterSeed struct {
// Merge implements the memberlist.Mergeable interface.
// It allow to merge the content of two different seeds.
func (c *ClusterSeed) Merge(mergeable memberlist.Mergeable, _ bool) (change memberlist.Mergeable, error error) {
func (c *ClusterSeed) Merge(mergeable memberlist.Mergeable, _ bool) (change memberlist.Mergeable, err error) {
if mergeable == nil {
return nil, nil
}

@ -312,17 +312,17 @@ func (s *Statistics) String() string {
func (s *Statistics) Value() map[string]interface{} {
stdvar := s.value.Load() / float64(s.count.Load())
stddev := math.Sqrt(stdvar)
min := s.min.Load()
max := s.max.Load()
minVal := s.min.Load()
maxVal := s.max.Load()
result := map[string]interface{}{
"avg": s.avg.Load(),
"count": s.count.Load(),
}
if !math.IsInf(min, 0) {
result["min"] = min
if !math.IsInf(minVal, 0) {
result["min"] = minVal
}
if !math.IsInf(max, 0) {
result["max"] = s.max.Load()
if !math.IsInf(maxVal, 0) {
result["max"] = maxVal
}
if !math.IsNaN(stddev) {
result["stddev"] = stddev
@ -335,20 +335,20 @@ func (s *Statistics) Value() map[string]interface{} {
func (s *Statistics) Record(v float64) {
for {
min := s.min.Load()
if min <= v {
minVal := s.min.Load()
if minVal <= v {
break
}
if s.min.CompareAndSwap(min, v) {
if s.min.CompareAndSwap(minVal, v) {
break
}
}
for {
max := s.max.Load()
if max >= v {
maxVal := s.max.Load()
if maxVal >= v {
break
}
if s.max.CompareAndSwap(max, v) {
if s.max.CompareAndSwap(maxVal, v) {
break
}
}

@ -15,7 +15,7 @@ import (
var defaultPageContent string
var defaultPageTemplate = template.Must(template.New("webpage").Funcs(template.FuncMap{
"durationSince": func(t time.Time) string { return time.Since(t).Truncate(time.Second).String() },
"offsetsLen": func(min, max int64) int64 { return max - min },
"offsetsLen": func(minVal, maxVal int64) int64 { return maxVal - minVal },
"humanize": humanize.Comma,
}).Parse(defaultPageContent))

@ -209,8 +209,8 @@ func TestOverlappingBlocksIter(t *testing.T) {
}
}
func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef {
bounds := v1.NewBounds(min, max)
func genBlockRef(minVal, maxVal model.Fingerprint) bloomshipper.BlockRef {
bounds := v1.NewBounds(minVal, maxVal)
return bloomshipper.BlockRef{
Ref: bloomshipper.Ref{
Bounds: bounds,

@ -24,13 +24,13 @@ func TsdbID(n int) tsdb.SingleTenantTSDBIdentifier {
}
}
func GenMeta(min, max model.Fingerprint, sources []int, blocks []bloomshipper.BlockRef) bloomshipper.Meta {
func GenMeta(minVal, maxVal model.Fingerprint, sources []int, blocks []bloomshipper.BlockRef) bloomshipper.Meta {
m := bloomshipper.Meta{
MetaRef: bloomshipper.MetaRef{
Ref: bloomshipper.Ref{
TenantID: "fakeTenant",
TableName: TestTable.Addr(),
Bounds: v1.NewBounds(min, max),
Bounds: v1.NewBounds(minVal, maxVal),
},
},
Blocks: blocks,
@ -41,13 +41,13 @@ func GenMeta(min, max model.Fingerprint, sources []int, blocks []bloomshipper.Bl
return m
}
func GenBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef {
func GenBlockRef(minVal, maxVal model.Fingerprint) bloomshipper.BlockRef {
startTS, endTS := TestDay.Bounds()
return bloomshipper.BlockRef{
Ref: bloomshipper.Ref{
TenantID: "fakeTenant",
TableName: TestTable.Addr(),
Bounds: v1.NewBounds(min, max),
Bounds: v1.NewBounds(minVal, maxVal),
StartTimestamp: startTS,
EndTimestamp: endTS,
Checksum: 0,

@ -13,8 +13,8 @@ type dayRangeIterator struct {
err error
}
func newDayRangeIterator(min, max config.DayTime, schemaCfg config.SchemaConfig) *dayRangeIterator {
return &dayRangeIterator{min: min, max: max, cur: min.Dec(), schemaCfg: schemaCfg}
func newDayRangeIterator(minVal, maxVal config.DayTime, schemaCfg config.SchemaConfig) *dayRangeIterator {
return &dayRangeIterator{min: minVal, max: maxVal, cur: minVal.Dec(), schemaCfg: schemaCfg}
}
func (r *dayRangeIterator) TotalDays() int {

@ -20,8 +20,8 @@ func Test_TsdbTokenRange(t *testing.T) {
added bool
err bool
}
mk := func(version int, min, max model.Fingerprint) addition {
return addition{version, v1.FingerprintBounds{Min: min, Max: max}}
mk := func(version int, minVal, maxVal model.Fingerprint) addition {
return addition{version, v1.FingerprintBounds{Min: minVal, Max: maxVal}}
}
tok := func(version int, through model.Fingerprint) tsdbToken {
return tsdbToken{version: version, through: through}

@ -57,20 +57,20 @@ func partitionTasksByBlock(tasks []Task, blocks []bloomshipper.BlockRef) []block
for _, task := range tasks {
refs := task.series
min := sort.Search(len(refs), func(i int) bool {
minVal := sort.Search(len(refs), func(i int) bool {
return block.Cmp(refs[i].Fingerprint) > v1.Before
})
max := sort.Search(len(refs), func(i int) bool {
maxVal := sort.Search(len(refs), func(i int) bool {
return block.Cmp(refs[i].Fingerprint) == v1.After
})
// All fingerprints fall outside of the consumer's range
if min == len(refs) || max == 0 || min == max {
if minVal == len(refs) || maxVal == 0 || minVal == maxVal {
continue
}
bounded.tasks = append(bounded.tasks, task.Copy(refs[min:max]))
bounded.tasks = append(bounded.tasks, task.Copy(refs[minVal:maxVal]))
}
if len(bounded.tasks) > 0 {

@ -48,7 +48,7 @@ func fillChunk(c Chunk) int64 {
return fillChunkClose(c, true)
}
func fillChunkClose(c Chunk, close bool) int64 {
func fillChunkClose(c Chunk, doClose bool) int64 {
i := int64(0)
inserted := int64(0)
entry := &logproto.Entry{
@ -73,13 +73,13 @@ func fillChunkClose(c Chunk, close bool) int64 {
entry.Line = testdata.LogString(i)
}
if close {
if doClose {
_ = c.Close()
}
return inserted
}
func fillChunkRandomOrder(c Chunk, close bool) {
func fillChunkRandomOrder(c Chunk, doClose bool) {
ub := int64(1 << 30)
i := int64(0)
random := rand.New(rand.NewSource(42))
@ -98,7 +98,7 @@ func fillChunkRandomOrder(c Chunk, close bool) {
entry.Line = testdata.LogString(i)
}
if close {
if doClose {
_ = c.Close()
}
}

@ -100,7 +100,7 @@ func (l *GenNumberLoader) GetResultsCacheGenNumber(tenantIDs []string) string {
}
func (l *GenNumberLoader) getCacheGenNumbersPerTenants(tenantIDs []string) string {
var max int
var maxVal int
for _, tenantID := range tenantIDs {
genNumber := l.getCacheGenNumber(tenantID)
if genNumber == "" {
@ -112,15 +112,15 @@ func (l *GenNumberLoader) getCacheGenNumbersPerTenants(tenantIDs []string) strin
level.Error(log.Logger).Log("msg", "error parsing resultsCacheGenNumber", "user", tenantID, "err", err)
}
if number > max {
max = number
if number > maxVal {
maxVal = number
}
}
if max == 0 {
if maxVal == 0 {
return ""
}
return fmt.Sprint(max)
return fmt.Sprint(maxVal)
}
func (l *GenNumberLoader) getCacheGenNumber(userID string) string {

@ -198,7 +198,7 @@ func TestColumnBuilder_Cardinality(t *testing.T) {
require.Equal(t, uint64(3), col.Info.Statistics.CardinalityCount)
}
func getMinMax(t *testing.T, stats *datasetmd.Statistics) (min, max Value) {
func getMinMax(t *testing.T, stats *datasetmd.Statistics) (minVal, maxVal Value) {
t.Helper()
require.NotNil(t, stats)

@ -6,14 +6,14 @@ type Tee interface {
}
// WrapTee wraps a new Tee around an existing Tee.
func WrapTee(existing, new Tee) Tee {
func WrapTee(existing, newTee Tee) Tee {
if existing == nil {
return new
return newTee
}
if multi, ok := existing.(*multiTee); ok {
return &multiTee{append(multi.tees, new)}
return &multiTee{append(multi.tees, newTee)}
}
return &multiTee{tees: []Tee{existing, new}}
return &multiTee{tees: []Tee{existing, newTee}}
}
type multiTee struct {

@ -332,18 +332,18 @@ func TestIngesterWALBackpressureCheckpoint(t *testing.T) {
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
}
func expectCheckpoint(t *testing.T, walDir string, shouldExist bool, max time.Duration) {
func expectCheckpoint(t *testing.T, walDir string, shouldExist bool, maxVal time.Duration) {
once := make(chan struct{}, 1)
once <- struct{}{}
deadline := time.After(max)
deadline := time.After(maxVal)
for {
select {
case <-deadline:
require.Fail(t, "timeout while waiting for checkpoint existence:", shouldExist)
case <-once: // Trick to ensure we check immediately before deferring to ticker.
default:
<-time.After(max / 10) // check 10x over the duration
<-time.After(maxVal / 10) // check 10x over the duration
}
fs, err := os.ReadDir(walDir)

@ -43,7 +43,7 @@ func (s *ownedStreamService) getOwnedStreamCount() int {
return int(s.ownedStreamCount.Load())
}
func (s *ownedStreamService) updateFixedLimit() (old, new int32) {
func (s *ownedStreamService) updateFixedLimit() (old, newVal int32) {
newLimit, _, _, _ := s.limiter.GetStreamCountLimit(s.tenantID)
return s.fixedLimit.Swap(int32(newLimit)), int32(newLimit)

@ -23,10 +23,10 @@ type cachedIterator struct {
// NewCachedIterator creates an iterator that cache iteration result and can be iterated again
// after closing it without re-using the underlaying iterator `it`.
func NewCachedIterator(it EntryIterator, cap int) CacheEntryIterator {
func NewCachedIterator(it EntryIterator, capacity int) CacheEntryIterator {
c := &cachedIterator{
wrapped: it,
cache: make([]entryWithLabels, 0, cap),
cache: make([]entryWithLabels, 0, capacity),
curr: -1,
}
return c
@ -120,10 +120,10 @@ type cachedSampleIterator struct {
// NewCachedSampleIterator creates an iterator that cache iteration result and can be iterated again
// after closing it without re-using the underlaying iterator `it`.
func NewCachedSampleIterator(it SampleIterator, cap int) CacheSampleIterator {
func NewCachedSampleIterator(it SampleIterator, capacity int) CacheSampleIterator {
c := &cachedSampleIterator{
wrapped: it,
cache: make([]sampleWithLabels, 0, cap),
cache: make([]sampleWithLabels, 0, capacity),
curr: -1,
}
return c

@ -227,10 +227,10 @@ func (it *CounterIter[T]) Count() int {
return it.count
}
func WithClose[T any](itr Iterator[T], close func() bool) *CloseIter[T] {
func WithClose[T any](itr Iterator[T], closeFunc func() bool) *CloseIter[T] {
return &CloseIter[T]{
Iterator: itr,
close: close,
close: closeFunc,
}
}

@ -95,7 +95,7 @@ func (EmptyLimits) PolicyFor(_ string, _ labels.Labels) string {
}
// StreamResolver is a request-scoped interface that provides retention period and policy for a given stream.
// The values returned by the resolver will not chance throught the handling of the request
// The values returned by the resolver will not chance thought the handling of the request
type StreamResolver interface {
RetentionPeriodFor(lbs labels.Labels) time.Duration
RetentionHoursFor(lbs labels.Labels) string
@ -105,7 +105,7 @@ type StreamResolver interface {
type (
RequestParser func(userID string, r *http.Request, limits Limits, tracker UsageTracker, streamResolver StreamResolver, logPushRequestStreams bool, logger log.Logger) (*logproto.PushRequest, *Stats, error)
RequestParserWrapper func(inner RequestParser) RequestParser
ErrorWriter func(w http.ResponseWriter, error string, code int, logger log.Logger)
ErrorWriter func(w http.ResponseWriter, errorStr string, code int, logger log.Logger)
)
type PolicyWithRetentionWithBytes map[string]map[time.Duration]int64
@ -376,7 +376,7 @@ func RetentionPeriodToString(retentionPeriod time.Duration) string {
// > 503 Service Unavailable
// > 504 Gateway Timeout
// In loki, we expect clients to retry on 500 errors, so we map 500 errors to 503.
func OTLPError(w http.ResponseWriter, error string, code int, logger log.Logger) {
func OTLPError(w http.ResponseWriter, errorStr string, code int, logger log.Logger) {
// Map 500 errors to 503. 500 errors are never retried on the client side, but 503 are.
if code == http.StatusInternalServerError {
code = http.StatusServiceUnavailable
@ -386,7 +386,7 @@ func OTLPError(w http.ResponseWriter, error string, code int, logger log.Logger)
w.WriteHeader(code)
// Status 0 because we omit the Status.code field.
status := grpcstatus.New(0, error).Proto()
status := grpcstatus.New(0, errorStr).Proto()
respBytes, err := proto.Marshal(status)
if err != nil {
level.Error(logger).Log("msg", "failed to marshal error response", "error", err)
@ -411,8 +411,8 @@ func OTLPError(w http.ResponseWriter, error string, code int, logger log.Logger)
var _ ErrorWriter = OTLPError
func HTTPError(w http.ResponseWriter, error string, code int, _ log.Logger) {
http.Error(w, error, code)
func HTTPError(w http.ResponseWriter, errorStr string, code int, _ log.Logger) {
http.Error(w, errorStr, code)
}
var _ ErrorWriter = HTTPError

@ -322,16 +322,16 @@ func newOrFilter(left MatcherFilterer, right MatcherFilterer) MatcherFilterer {
}
// ChainOrMatcherFilterer is a syntax sugar to chain multiple `or` filters. (1 or many)
func ChainOrMatcherFilterer(curr, new MatcherFilterer) MatcherFilterer {
func ChainOrMatcherFilterer(curr, newFilterer MatcherFilterer) MatcherFilterer {
if curr == nil {
return new
return newFilterer
}
return newOrFilter(curr, new)
return newOrFilter(curr, newFilterer)
}
// ChainOrFilter is a syntax sugar to chain multiple `or` filters. (1 or many)
func ChainOrFilter(curr, new Filterer) Filterer {
return ChainOrMatcherFilterer(WrapFilterer(curr), WrapFilterer(new))
func ChainOrFilter(curr, newFilterer Filterer) Filterer {
return ChainOrMatcherFilterer(WrapFilterer(curr), WrapFilterer(newFilterer))
}
func (a orFilter) Filter(line []byte) bool {

@ -403,23 +403,23 @@ func avgOverTime(samples []promql.FPoint) float64 {
}
func maxOverTime(samples []promql.FPoint) float64 {
max := samples[0].F
maxVal := samples[0].F
for _, v := range samples {
if v.F > max || math.IsNaN(max) {
max = v.F
if v.F > maxVal || math.IsNaN(maxVal) {
maxVal = v.F
}
}
return max
return maxVal
}
func minOverTime(samples []promql.FPoint) float64 {
min := samples[0].F
minVal := samples[0].F
for _, v := range samples {
if v.F < min || math.IsNaN(min) {
min = v.F
if v.F < minVal || math.IsNaN(minVal) {
minVal = v.F
}
}
return min
return minVal
}
// stdvarOverTime calculates the variance using Welford's online algorithm.

@ -72,26 +72,26 @@ func (s *CountMinSketch) Increment(event []byte) {
func (s *CountMinSketch) ConservativeAdd(event []byte, count float64) (float64, uint32, uint32) {
s.HyperLogLog.Insert(event)
min := float64(math.MaxUint64)
minVal := float64(math.MaxUint64)
h1, h2 := hashn(event)
// inline Count to save time/memory
var pos uint32
for i := uint32(0); i < s.Depth; i++ {
pos = s.getPos(h1, h2, i)
if s.Counters[i][pos] < min {
min = s.Counters[i][pos]
if s.Counters[i][pos] < minVal {
minVal = s.Counters[i][pos]
}
}
min += count
minVal += count
for i := uint32(0); i < s.Depth; i++ {
pos = s.getPos(h1, h2, i)
v := s.Counters[i][pos]
if v < min {
s.Counters[i][pos] = min
if v < minVal {
s.Counters[i][pos] = minVal
}
}
return min, h1, h2
return minVal, h1, h2
}
func (s *CountMinSketch) ConservativeIncrement(event []byte) (float64, uint32, uint32) {
@ -100,17 +100,17 @@ func (s *CountMinSketch) ConservativeIncrement(event []byte) (float64, uint32, u
// Count returns the approximate min count for the given input.
func (s *CountMinSketch) Count(event []byte) float64 {
min := float64(math.MaxUint64)
minVal := float64(math.MaxUint64)
h1, h2 := hashn(event)
var pos uint32
for i := uint32(0); i < s.Depth; i++ {
pos = s.getPos(h1, h2, i)
if s.Counters[i][pos] < min {
min = s.Counters[i][pos]
if s.Counters[i][pos] < minVal {
minVal = s.Counters[i][pos]
}
}
return min
return minVal
}
// Merge the given sketch into this one.

@ -18,13 +18,13 @@ func TestCMS(_ *testing.T) {
numStreams := 10
maxPerStream := 100
events := make([]event, 0)
max := int64(0)
maxVal := int64(0)
for j := 0; j < numStreams-k; j++ {
num := int64(maxPerStream)
n := rand.Int63n(num) + 1
if n > max {
max = n
if n > maxVal {
maxVal = n
}
for z := 0; z < int(n); z++ {
events = append(events, event{name: strconv.Itoa(j), count: 1})
@ -32,7 +32,7 @@ func TestCMS(_ *testing.T) {
}
// then another set of things more than the max of the previous entries
for z := numStreams - k; z < numStreams; z++ {
n := rand.Int63n(int64(maxPerStream)) + 1 + max
n := rand.Int63n(int64(maxPerStream)) + 1 + maxVal
for x := 0; x < int(n); x++ {
events = append(events, event{name: strconv.Itoa(z), count: 1})
}

@ -21,20 +21,20 @@ type event struct {
}
func TestTopkCardinality(t *testing.T) {
max := 1000000
maxVal := 1000000
topk, err := newCMSTopK(100, 10, 10)
assert.NoError(t, err)
for i := 0; i < max; i++ {
for i := 0; i < maxVal; i++ {
topk.Observe(strconv.Itoa(i))
}
c, bigEnough := topk.Cardinality()
// hll has a typical error accuracy of 2%
assert.True(t, (c >= uint64(float64(max)*0.98)) && (c <= uint64(float64(max)*1.02)))
assert.True(t, (c >= uint64(float64(maxVal)*0.98)) && (c <= uint64(float64(maxVal)*1.02)))
assert.False(t, bigEnough)
topk, err = NewCMSTopkForCardinality(nil, 100, max)
topk, err = NewCMSTopkForCardinality(nil, 100, maxVal)
assert.NoError(t, err)
for i := 0; i < max; i++ {
for i := 0; i < maxVal; i++ {
topk.Observe(strconv.Itoa(i))
}
c, bigEnough = topk.Cardinality()
@ -47,14 +47,14 @@ func TestTopK_Merge(t *testing.T) {
k := 1
maxPerStream := 1000
events := make([]event, 0)
max := int64(0)
maxVal := int64(0)
r := rand.New(rand.NewSource(99))
for i := 0; i < nStreams-k; i++ {
num := int64(maxPerStream)
n := r.Int63n(num) + 1
if n > max {
max = n
if n > maxVal {
maxVal = n
}
for j := 0; j < int(n); j++ {
events = append(events, event{name: strconv.Itoa(i), count: 1})
@ -62,7 +62,7 @@ func TestTopK_Merge(t *testing.T) {
}
// then another set of things more than the max of the previous entries
for i := nStreams - k; i < nStreams; i++ {
n := rand.Int63n(int64(maxPerStream)) + 1 + max
n := rand.Int63n(int64(maxPerStream)) + 1 + maxVal
for j := 0; j < int(n); j++ {
events = append(events, event{name: strconv.Itoa(i), count: 1})
}

@ -364,12 +364,12 @@ func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch)
func (s *Scanner) digits(ch0 rune, base int, invalid *rune) (ch rune, digsep int) {
ch = ch0
if base <= 10 {
max := rune('0' + base)
maxVal := rune('0' + base)
for isDecimal(ch) || ch == '_' {
ds := 1
if ch == '_' {
ds = 2
} else if ch >= max && *invalid == 0 {
} else if ch >= maxVal && *invalid == 0 {
*invalid = ch
}
digsep |= ds

@ -19,13 +19,13 @@ type patternSample struct {
sample logproto.PatternSample
}
var max = patternSample{
var maxSample = patternSample{
pattern: "",
sample: logproto.PatternSample{Timestamp: math.MaxInt64},
}
func NewMerge(iters ...Iterator) Iterator {
tree := loser.New(iters, max, func(s Iterator) patternSample {
tree := loser.New(iters, maxSample, func(s Iterator) patternSample {
return patternSample{
pattern: s.Pattern(),
sample: s.At(),

@ -460,9 +460,9 @@ type SemaphoreWithTiming struct {
sem *semaphore.Weighted
}
func NewSemaphoreWithTiming(max int64) *SemaphoreWithTiming {
func NewSemaphoreWithTiming(maxVal int64) *SemaphoreWithTiming {
return &SemaphoreWithTiming{
sem: semaphore.NewWeighted(max),
sem: semaphore.NewWeighted(maxVal),
}
}

@ -231,11 +231,11 @@ func Test_MaxQueryParallelism(t *testing.T) {
maxQueryParallelism := 2
var count atomic.Int32
var max atomic.Int32
var maxVal atomic.Int32
h := base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) {
cur := count.Inc()
if cur > max.Load() {
max.Store(cur)
if cur > maxVal.Load() {
maxVal.Store(cur)
}
defer count.Dec()
// simulate some work
@ -261,7 +261,7 @@ func Test_MaxQueryParallelism(t *testing.T) {
})
}),
).Do(ctx, &LokiRequest{})
maxFound := int(max.Load())
maxFound := int(maxVal.Load())
require.LessOrEqual(t, maxFound, maxQueryParallelism, "max query parallelism: ", maxFound, " went over the configured one:", maxQueryParallelism)
}

@ -59,8 +59,8 @@ func ParseBoundsFromParts(a, b string) (FingerprintBounds, error) {
return NewBounds(minFingerprint, maxFingerprint), nil
}
func NewBounds(min, max model.Fingerprint) FingerprintBounds {
return FingerprintBounds{Min: min, Max: max}
func NewBounds(minVal, maxVal model.Fingerprint) FingerprintBounds {
return FingerprintBounds{Min: minVal, Max: maxVal}
}
func (b FingerprintBounds) Hash(h hash.Hash32) error {
@ -120,8 +120,8 @@ func (b FingerprintBounds) Bounds() (model.Fingerprint, model.Fingerprint) {
}
// Slice returns a new fingerprint bounds clipped to the target bounds or nil if there is no overlap
func (b FingerprintBounds) Slice(min, max model.Fingerprint) *FingerprintBounds {
return b.Intersection(FingerprintBounds{Min: min, Max: max})
func (b FingerprintBounds) Slice(minVal, maxVal model.Fingerprint) *FingerprintBounds {
return b.Intersection(FingerprintBounds{Min: minVal, Max: maxVal})
}
// Within returns whether the fingerprint is fully within the target bounds

@ -221,10 +221,10 @@ func TestMergeBuilder(t *testing.T) {
indexBuf := bytes.NewBuffer(nil)
bloomsBuf := bytes.NewBuffer(nil)
min := i * numSeries / nBlocks
max := (i + 2) * numSeries / nBlocks // allow some overlap
if max > len(data) {
max = len(data)
minVal := i * numSeries / nBlocks
maxVal := (i + 2) * numSeries / nBlocks // allow some overlap
if maxVal > len(data) {
maxVal = len(data)
}
writer := NewMemoryBlockWriter(indexBuf, bloomsBuf)
@ -236,7 +236,7 @@ func TestMergeBuilder(t *testing.T) {
)
require.Nil(t, err)
itr := iter.NewSliceIter(data[min:max])
itr := iter.NewSliceIter(data[minVal:maxVal])
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
blocks = append(blocks, iter.NewPeekIter(NewBlockQuerier(NewBlock(reader, NewMetrics(nil)), &mempool.SimpleHeapAllocator{}, DefaultMaxPageSize).Iter()))

@ -22,8 +22,8 @@ import (
func TestMaxBucketValue(t *testing.T) {
b := NewBuckets(10, 2)
if max := b.MaxBucketValue(); max != 3 {
t.Errorf("Expected 3, got %d", max)
if maxVal := b.MaxBucketValue(); maxVal != 3 {
t.Errorf("Expected 3, got %d", maxVal)
}
}

@ -691,12 +691,12 @@ func (b dynamoDBWriteBatch) Delete(tableName, hashValue string, rangeValue []byt
})
}
// Fill 'b' with WriteRequests from 'from' until 'b' has at most max requests. Remove those requests from 'from'.
func (b dynamoDBWriteBatch) TakeReqs(from dynamoDBWriteBatch, max int) {
// Fill 'b' with WriteRequests from 'from' until 'b' has at most maxVal requests. Remove those requests from 'from'.
func (b dynamoDBWriteBatch) TakeReqs(from dynamoDBWriteBatch, maxVal int) {
outLen, inLen := b.Len(), from.Len()
toFill := inLen
if max > 0 {
toFill = min(inLen, max-outLen)
if maxVal > 0 {
toFill = min(inLen, maxVal-outLen)
}
for toFill > 0 {
for tableName, fromReqs := range from {
@ -738,12 +738,12 @@ func (b dynamoDBReadRequest) Add(tableName, hashValue string, rangeValue []byte)
})
}
// Fill 'b' with ReadRequests from 'from' until 'b' has at most max requests. Remove those requests from 'from'.
func (b dynamoDBReadRequest) TakeReqs(from dynamoDBReadRequest, max int) {
// Fill 'b' with ReadRequests from 'from' until 'b' has at most maxVal requests. Remove those requests from 'from'.
func (b dynamoDBReadRequest) TakeReqs(from dynamoDBReadRequest, maxVal int) {
outLen, inLen := b.Len(), from.Len()
toFill := inLen
if max > 0 {
toFill = min(inLen, max-outLen)
if maxVal > 0 {
toFill = min(inLen, maxVal-outLen)
}
for toFill > 0 {
for tableName, fromReqs := range from {

@ -118,10 +118,10 @@ type limitedHedgingRoundTripper struct {
limiter *rate.Limiter
}
func newLimitedHedgingRoundTripper(max int, next http.RoundTripper) *limitedHedgingRoundTripper {
func newLimitedHedgingRoundTripper(maxVal int, next http.RoundTripper) *limitedHedgingRoundTripper {
return &limitedHedgingRoundTripper{
next: next,
limiter: rate.NewLimiter(rate.Limit(max), max),
limiter: rate.NewLimiter(rate.Limit(maxVal), maxVal),
}
}

@ -167,13 +167,13 @@ func (b *BlockDirectory) resolveSize() error {
// The passed function `close` is called when the the returned querier is closed.
func (b BlockDirectory) BlockQuerier(
alloc mempool.Allocator,
close func() error,
closeFunc func() error,
maxPageSize int,
metrics *v1.Metrics,
) *CloseableBlockQuerier {
return &CloseableBlockQuerier{
BlockQuerier: v1.NewBlockQuerier(b.Block(metrics), alloc, maxPageSize),
BlockRef: b.BlockRef,
close: close,
close: closeFunc,
}
}

@ -853,11 +853,11 @@ type ShardedPostings struct {
// ---[shard0]--- # Shard membership
// -[--shard0--]- # Series returned by shardedPostings
func NewShardedPostings(p Postings, fpFilter FingerprintFilter, offsets FingerprintOffsets) *ShardedPostings {
min, max := offsets.Range(fpFilter)
minVal, maxVal := offsets.Range(fpFilter)
return &ShardedPostings{
p: p,
minOffset: min,
maxOffset: max,
minOffset: minVal,
maxOffset: maxVal,
}
}

@ -31,10 +31,10 @@ type maxHeap struct {
Items []Stat
}
func (m *maxHeap) init(len int) {
m.maxLength = len
func (m *maxHeap) init(lenVal int) {
m.maxLength = lenVal
m.minValue = math.MaxUint64
m.Items = make([]Stat, 0, len)
m.Items = make([]Stat, 0, lenVal)
}
func (m *maxHeap) push(item Stat) {

@ -20,10 +20,10 @@ import (
func TestPostingsStats(t *testing.T) {
stats := &maxHeap{}
max := 3000000
maxVal := 3000000
heapLength := 10
stats.init(heapLength)
for i := 0; i < max; i++ {
for i := 0; i < maxVal; i++ {
item := Stat{
Name: "Label-da",
Count: uint64(i),
@ -35,7 +35,7 @@ func TestPostingsStats(t *testing.T) {
data := stats.get()
require.Equal(t, 10, len(data))
for i := 0; i < heapLength; i++ {
require.Equal(t, uint64(max-i), data[i].Count)
require.Equal(t, uint64(maxVal-i), data[i].Count)
}
}
@ -57,12 +57,12 @@ func TestPostingsStats2(t *testing.T) {
func BenchmarkPostingStatsMaxHep(b *testing.B) {
stats := &maxHeap{}
max := 9000000
maxVal := 9000000
heapLength := 10
b.ResetTimer()
for n := 0; n < b.N; n++ {
stats.init(heapLength)
for i := 0; i < max; i++ {
for i := 0; i < maxVal; i++ {
item := Stat{
Name: "Label-da",
Count: uint64(i),

@ -36,11 +36,11 @@ func TestSizedFPs_Sort(t *testing.T) {
}
func TestSizedFPs_ShardsFor(t *testing.T) {
mkShard := func(min, max model.Fingerprint, streams, chks, entries, bytes uint64) logproto.Shard {
mkShard := func(minVal, maxVal model.Fingerprint, streams, chks, entries, bytes uint64) logproto.Shard {
return logproto.Shard{
Bounds: logproto.FPBounds{
Min: min,
Max: max,
Min: minVal,
Max: maxVal,
},
Stats: &stats.Stats{
Streams: streams,

@ -131,9 +131,9 @@ func rulesEqual(a, b *rulefmt.RuleNode) bool {
// CompareNamespaces returns the differences between the two provided
// namespaces
func CompareNamespaces(original, new RuleNamespace) NamespaceChange {
func CompareNamespaces(original, newNamespace RuleNamespace) NamespaceChange {
result := NamespaceChange{
Namespace: new.Namespace,
Namespace: newNamespace.Namespace,
State: Unchanged,
GroupsUpdated: []UpdatedRuleGroup{},
GroupsCreated: []rwrulefmt.RuleGroup{},
@ -145,7 +145,7 @@ func CompareNamespaces(original, new RuleNamespace) NamespaceChange {
origMap[g.Name] = g
}
for _, newGroup := range new.Groups {
for _, newGroup := range newNamespace.Groups {
origGroup, found := origMap[newGroup.Name]
if !found {
result.State = Updated

@ -6,13 +6,13 @@ type Sequence interface {
Next() bool // Advances and returns true if there is a value at this new position.
}
func New[E any, S Sequence](sequences []S, maxVal E, at func(S) E, less func(E, E) bool, close func(S)) *Tree[E, S] {
func New[E any, S Sequence](sequences []S, maxVal E, at func(S) E, less func(E, E) bool, closeFunc func(S)) *Tree[E, S] {
nSequences := len(sequences)
t := Tree[E, S]{
maxVal: maxVal,
at: at,
less: less,
close: close,
close: closeFunc,
nodes: make([]node[E, S], nSequences*2),
}
for i, s := range sequences {

Loading…
Cancel
Save