Rename fudge_duplicate_timestamp to be increment_duplicate_timestamp (#6120)

* Rename fudge_duplicate_timestamp to be increment_duplicate_timestamp

* run `gofmt -d -w pkg/validation/limits.go`

Co-authored-by: Christian Simon <simon@swine.de>
pull/6207/head
Karen Miller 4 years ago committed by GitHub
parent 3c334001cd
commit b90c460769
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 19
      docs/sources/configuration/_index.md
  2. 6
      pkg/distributor/distributor.go
  3. 44
      pkg/distributor/distributor_test.go
  4. 2
      pkg/distributor/limits.go
  5. 22
      pkg/distributor/validator.go
  6. 32
      pkg/validation/limits.go

@ -2163,15 +2163,16 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# CLI flag: -distributor.max-line-size-truncate # CLI flag: -distributor.max-line-size-truncate
[max_line_size_truncate: <boolean> | default = false ] [max_line_size_truncate: <boolean> | default = false ]
# Fudge the log line timestamp during ingestion when it's the same as the previous entry for the same stream # Alter the log line timestamp during ingestion when the timestamp is the same as the
# When enabled, if a log line in a push request has the same timestamp as the previous line # previous entry for the same stream. When enabled, if a log line in a push request has
# for the same stream, one nanosecond is added to the log line. This will preserve the received # the same timestamp as the previous line for the same stream, one nanosecond is added
# order of log lines with the exact same timestamp when they are queried by slightly altering # to the log line. This will preserve the received order of log lines with the exact
# their stored timestamp. NOTE: this is imperfect because Loki accepts out of order writes # same timestamp when they are queried, by slightly altering their stored timestamp.
# and another push request for the same stream could contain duplicate timestamps to existing # NOTE: This is imperfect, because Loki accepts out of order writes, and another push
# entries and they will not be fudged. # request for the same stream could contain duplicate timestamps to existing
# CLI flag: -validation.fudge-duplicate-timestamps # entries and they will not be incremented.
[fudge_duplicate_timestamp: <boolean> | default = false ] # CLI flag: -validation.increment-duplicate-timestamps
[increment_duplicate_timestamp: <boolean> | default = false ]
# Maximum number of log entries that will be returned for a query. # Maximum number of log entries that will be returned for a query.
# CLI flag: -validation.max-entries-limit # CLI flag: -validation.max-entries-limit

@ -273,13 +273,13 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
stream.Entries[n] = entry stream.Entries[n] = entry
// If configured for this tenant, fudge duplicate timestamps. Note, this is imperfect // If configured for this tenant, increment duplicate timestamps. Note, this is imperfect
// since Loki will accept out of order writes it doesn't account for separate // since Loki will accept out of order writes it doesn't account for separate
// pushes with overlapping time ranges having entries with duplicate timestamps // pushes with overlapping time ranges having entries with duplicate timestamps
if validationContext.fudgeDuplicateTimestamps && n != 0 && stream.Entries[n-1].Timestamp.Equal(entry.Timestamp) { if validationContext.incrementDuplicateTimestamps && n != 0 && stream.Entries[n-1].Timestamp.Equal(entry.Timestamp) {
// Traditional logic for Loki is that 2 lines with the same timestamp and // Traditional logic for Loki is that 2 lines with the same timestamp and
// exact same content will be de-duplicated, (i.e. only one will be stored, others dropped) // exact same content will be de-duplicated, (i.e. only one will be stored, others dropped)
// To maintain this behavior, only fudge the timestamp if the log content is different // To maintain this behavior, only increment the timestamp if the log content is different
if stream.Entries[n-1].Line != entry.Line { if stream.Entries[n-1].Line != entry.Line {
stream.Entries[n].Timestamp = entry.Timestamp.Add(1 * time.Nanosecond) stream.Entries[n].Timestamp = entry.Timestamp.Add(1 * time.Nanosecond)
} }

@ -100,23 +100,23 @@ func TestDistributor(t *testing.T) {
} }
} }
func Test_FudgeTimestamp(t *testing.T) { func Test_IncrementTimestamp(t *testing.T) {
fudgingDisabled := &validation.Limits{} incrementingDisabled := &validation.Limits{}
flagext.DefaultValues(fudgingDisabled) flagext.DefaultValues(incrementingDisabled)
fudgingDisabled.RejectOldSamples = false incrementingDisabled.RejectOldSamples = false
fudgingEnabled := &validation.Limits{} incrementingEnabled := &validation.Limits{}
flagext.DefaultValues(fudgingEnabled) flagext.DefaultValues(incrementingEnabled)
fudgingEnabled.RejectOldSamples = false incrementingEnabled.RejectOldSamples = false
fudgingEnabled.FudgeDuplicateTimestamp = true incrementingEnabled.IncrementDuplicateTimestamp = true
tests := map[string]struct { tests := map[string]struct {
limits *validation.Limits limits *validation.Limits
push *logproto.PushRequest push *logproto.PushRequest
expectedPush *logproto.PushRequest expectedPush *logproto.PushRequest
}{ }{
"fudging disabled, no dupes": { "incrementing disabled, no dupes": {
limits: fudgingDisabled, limits: incrementingDisabled,
push: &logproto.PushRequest{ push: &logproto.PushRequest{
Streams: []logproto.Stream{ Streams: []logproto.Stream{
{ {
@ -140,8 +140,8 @@ func Test_FudgeTimestamp(t *testing.T) {
}, },
}, },
}, },
"fudging disabled, with dupe timestamp different entry": { "incrementing disabled, with dupe timestamp different entry": {
limits: fudgingDisabled, limits: incrementingDisabled,
push: &logproto.PushRequest{ push: &logproto.PushRequest{
Streams: []logproto.Stream{ Streams: []logproto.Stream{
{ {
@ -165,8 +165,8 @@ func Test_FudgeTimestamp(t *testing.T) {
}, },
}, },
}, },
"fudging disabled, with dupe timestamp same entry": { "incrementing disabled, with dupe timestamp same entry": {
limits: fudgingDisabled, limits: incrementingDisabled,
push: &logproto.PushRequest{ push: &logproto.PushRequest{
Streams: []logproto.Stream{ Streams: []logproto.Stream{
{ {
@ -190,8 +190,8 @@ func Test_FudgeTimestamp(t *testing.T) {
}, },
}, },
}, },
"fudging enabled, no dupes": { "incrementing enabled, no dupes": {
limits: fudgingEnabled, limits: incrementingEnabled,
push: &logproto.PushRequest{ push: &logproto.PushRequest{
Streams: []logproto.Stream{ Streams: []logproto.Stream{
{ {
@ -215,8 +215,8 @@ func Test_FudgeTimestamp(t *testing.T) {
}, },
}, },
}, },
"fudging enabled, with dupe timestamp different entry": { "incrementing enabled, with dupe timestamp different entry": {
limits: fudgingEnabled, limits: incrementingEnabled,
push: &logproto.PushRequest{ push: &logproto.PushRequest{
Streams: []logproto.Stream{ Streams: []logproto.Stream{
{ {
@ -240,8 +240,8 @@ func Test_FudgeTimestamp(t *testing.T) {
}, },
}, },
}, },
"fudging enabled, with dupe timestamp same entry": { "incrementing enabled, with dupe timestamp same entry": {
limits: fudgingEnabled, limits: incrementingEnabled,
push: &logproto.PushRequest{ push: &logproto.PushRequest{
Streams: []logproto.Stream{ Streams: []logproto.Stream{
{ {
@ -265,8 +265,8 @@ func Test_FudgeTimestamp(t *testing.T) {
}, },
}, },
}, },
"fudging enabled, multiple subsequent fudges": { "incrementing enabled, multiple subsequent increments": {
limits: fudgingEnabled, limits: incrementingEnabled,
push: &logproto.PushRequest{ push: &logproto.PushRequest{
Streams: []logproto.Stream{ Streams: []logproto.Stream{
{ {

@ -15,5 +15,5 @@ type Limits interface {
RejectOldSamples(userID string) bool RejectOldSamples(userID string) bool
RejectOldSamplesMaxAge(userID string) time.Duration RejectOldSamplesMaxAge(userID string) time.Duration
FudgeDuplicateTimestamps(userID string) bool IncrementDuplicateTimestamps(userID string) bool
} }

@ -40,23 +40,23 @@ type validationContext struct {
maxLabelNameLength int maxLabelNameLength int
maxLabelValueLength int maxLabelValueLength int
fudgeDuplicateTimestamps bool incrementDuplicateTimestamps bool
userID string userID string
} }
func (v Validator) getValidationContextForTime(now time.Time, userID string) validationContext { func (v Validator) getValidationContextForTime(now time.Time, userID string) validationContext {
return validationContext{ return validationContext{
userID: userID, userID: userID,
rejectOldSample: v.RejectOldSamples(userID), rejectOldSample: v.RejectOldSamples(userID),
rejectOldSampleMaxAge: now.Add(-v.RejectOldSamplesMaxAge(userID)).UnixNano(), rejectOldSampleMaxAge: now.Add(-v.RejectOldSamplesMaxAge(userID)).UnixNano(),
creationGracePeriod: now.Add(v.CreationGracePeriod(userID)).UnixNano(), creationGracePeriod: now.Add(v.CreationGracePeriod(userID)).UnixNano(),
maxLineSize: v.MaxLineSize(userID), maxLineSize: v.MaxLineSize(userID),
maxLineSizeTruncate: v.MaxLineSizeTruncate(userID), maxLineSizeTruncate: v.MaxLineSizeTruncate(userID),
maxLabelNamesPerSeries: v.MaxLabelNamesPerSeries(userID), maxLabelNamesPerSeries: v.MaxLabelNamesPerSeries(userID),
maxLabelNameLength: v.MaxLabelNameLength(userID), maxLabelNameLength: v.MaxLabelNameLength(userID),
maxLabelValueLength: v.MaxLabelValueLength(userID), maxLabelValueLength: v.MaxLabelValueLength(userID),
fudgeDuplicateTimestamps: v.FudgeDuplicateTimestamps(userID), incrementDuplicateTimestamps: v.IncrementDuplicateTimestamps(userID),
} }
} }

@ -46,19 +46,19 @@ const (
// to support user-friendly duration format (e.g: "1h30m45s") in JSON value. // to support user-friendly duration format (e.g: "1h30m45s") in JSON value.
type Limits struct { type Limits struct {
// Distributor enforced limits. // Distributor enforced limits.
IngestionRateStrategy string `yaml:"ingestion_rate_strategy" json:"ingestion_rate_strategy"` IngestionRateStrategy string `yaml:"ingestion_rate_strategy" json:"ingestion_rate_strategy"`
IngestionRateMB float64 `yaml:"ingestion_rate_mb" json:"ingestion_rate_mb"` IngestionRateMB float64 `yaml:"ingestion_rate_mb" json:"ingestion_rate_mb"`
IngestionBurstSizeMB float64 `yaml:"ingestion_burst_size_mb" json:"ingestion_burst_size_mb"` IngestionBurstSizeMB float64 `yaml:"ingestion_burst_size_mb" json:"ingestion_burst_size_mb"`
MaxLabelNameLength int `yaml:"max_label_name_length" json:"max_label_name_length"` MaxLabelNameLength int `yaml:"max_label_name_length" json:"max_label_name_length"`
MaxLabelValueLength int `yaml:"max_label_value_length" json:"max_label_value_length"` MaxLabelValueLength int `yaml:"max_label_value_length" json:"max_label_value_length"`
MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series" json:"max_label_names_per_series"` MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series" json:"max_label_names_per_series"`
RejectOldSamples bool `yaml:"reject_old_samples" json:"reject_old_samples"` RejectOldSamples bool `yaml:"reject_old_samples" json:"reject_old_samples"`
RejectOldSamplesMaxAge model.Duration `yaml:"reject_old_samples_max_age" json:"reject_old_samples_max_age"` RejectOldSamplesMaxAge model.Duration `yaml:"reject_old_samples_max_age" json:"reject_old_samples_max_age"`
CreationGracePeriod model.Duration `yaml:"creation_grace_period" json:"creation_grace_period"` CreationGracePeriod model.Duration `yaml:"creation_grace_period" json:"creation_grace_period"`
EnforceMetricName bool `yaml:"enforce_metric_name" json:"enforce_metric_name"` EnforceMetricName bool `yaml:"enforce_metric_name" json:"enforce_metric_name"`
MaxLineSize flagext.ByteSize `yaml:"max_line_size" json:"max_line_size"` MaxLineSize flagext.ByteSize `yaml:"max_line_size" json:"max_line_size"`
MaxLineSizeTruncate bool `yaml:"max_line_size_truncate" json:"max_line_size_truncate"` MaxLineSizeTruncate bool `yaml:"max_line_size_truncate" json:"max_line_size_truncate"`
FudgeDuplicateTimestamp bool `yaml:"fudge_duplicate_timestamp" json:"fudge_duplicate_timestamp"` IncrementDuplicateTimestamp bool `yaml:"increment_duplicate_timestamp" json:"increment_duplicate_timestamp"`
// Ingester enforced limits. // Ingester enforced limits.
MaxLocalStreamsPerUser int `yaml:"max_streams_per_user" json:"max_streams_per_user"` MaxLocalStreamsPerUser int `yaml:"max_streams_per_user" json:"max_streams_per_user"`
@ -136,7 +136,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name") f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name")
f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.") f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.")
f.BoolVar(&l.RejectOldSamples, "validation.reject-old-samples", true, "Reject old samples.") f.BoolVar(&l.RejectOldSamples, "validation.reject-old-samples", true, "Reject old samples.")
f.BoolVar(&l.FudgeDuplicateTimestamp, "validation.fudge-duplicate-timestamps", false, "Fudge the timestamp of a log line by one nanosecond in the future from a previous entry for the same stream with the same timestamp, guarantees sort order at query time.") f.BoolVar(&l.IncrementDuplicateTimestamp, "validation.increment-duplicate-timestamps", false, "Increment the timestamp of a log line by one nanosecond in the future from a previous entry for the same stream with the same timestamp; guarantees sort order at query time.")
_ = l.RejectOldSamplesMaxAge.Set("7d") _ = l.RejectOldSamplesMaxAge.Set("7d")
f.Var(&l.RejectOldSamplesMaxAge, "validation.reject-old-samples.max-age", "Maximum accepted sample age before rejecting.") f.Var(&l.RejectOldSamplesMaxAge, "validation.reject-old-samples.max-age", "Maximum accepted sample age before rejecting.")
@ -539,8 +539,8 @@ func (o *Overrides) PerStreamRateLimit(userID string) RateLimit {
} }
} }
func (o *Overrides) FudgeDuplicateTimestamps(userID string) bool { func (o *Overrides) IncrementDuplicateTimestamps(userID string) bool {
return o.getOverridesForUser(userID).FudgeDuplicateTimestamp return o.getOverridesForUser(userID).IncrementDuplicateTimestamp
} }
func (o *Overrides) getOverridesForUser(userID string) *Limits { func (o *Overrides) getOverridesForUser(userID string) *Limits {

Loading…
Cancel
Save