Compare commits

...

3 Commits

Author SHA1 Message Date
Arve Knudsen 9c791faade
OTLP receiver: Don't append _total suffix to non-monotonic OTel sums (#16776) 1 day ago
Owen Williams aa1d46a9da
scrape: set validation and escaping defaults in default config vars (#16751) 1 day ago
George Krajcsovits 5b7ff92d95
fix(promql): histogram_quantile and histogram_fraction NaN observed in native histogram (#16724) 2 days ago
  1. 28
      config/config.go
  2. 128
      config/config_test.go
  3. 15
      docs/querying/functions.md
  4. 8
      promql/functions.go
  5. 45
      promql/promqltest/testdata/native_histograms.test
  6. 104
      promql/quantile.go
  7. 2
      scrape/scrape.go
  8. 2
      storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
  9. 418
      storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
  10. 24
      util/annotations/annotations.go

@ -172,6 +172,8 @@ var (
ScrapeProtocols: DefaultScrapeProtocols,
ConvertClassicHistogramsToNHCB: false,
AlwaysScrapeClassicHistograms: false,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
}
DefaultRuntimeConfig = RuntimeConfig{
@ -179,7 +181,10 @@ var (
GoGC: getGoGC(),
}
// DefaultScrapeConfig is the default scrape configuration.
// DefaultScrapeConfig is the default scrape configuration. Users of this
// default MUST call Validate() on the config after creation, even if it's
// used unaltered, to check for parameter correctness and fill out default
// values that can't be set inline in this declaration.
DefaultScrapeConfig = ScrapeConfig{
// ScrapeTimeout, ScrapeInterval, ScrapeProtocols, AlwaysScrapeClassicHistograms, and ConvertClassicHistogramsToNHCB default to the configured globals.
MetricsPath: "/metrics",
@ -940,6 +945,12 @@ func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
// ToValidationScheme returns the validation scheme for the given string config value.
func ToValidationScheme(s string) (validationScheme model.ValidationScheme, err error) {
switch s {
case "":
// This is a workaround for third party exporters that don't set the validation scheme.
if DefaultGlobalConfig.MetricNameValidationScheme == "" {
return model.UTF8Validation, errors.New("global metric name validation scheme is not set")
}
return ToValidationScheme(DefaultGlobalConfig.MetricNameValidationScheme)
case UTF8ValidationConfig:
validationScheme = model.UTF8Validation
case LegacyValidationConfig:
@ -951,6 +962,21 @@ func ToValidationScheme(s string) (validationScheme model.ValidationScheme, err
return validationScheme, nil
}
// ToEscapingScheme wraps the equivalent common library function with the
// desired default behavior based on the given validation scheme. This is a
// workaround for third party exporters that don't set the escaping scheme.
func ToEscapingScheme(s string, v model.ValidationScheme) (model.EscapingScheme, error) {
if s == "" {
switch v {
case model.UTF8Validation:
return model.NoEscaping, nil
case model.LegacyValidation:
return model.UnderscoreEscaping, nil
}
}
return model.ToEscapingScheme(s)
}
// ConvertClassicHistogramsToNHCBEnabled returns whether to convert classic histograms to NHCB.
func (c *ScrapeConfig) ConvertClassicHistogramsToNHCBEnabled() bool {
return c.ConvertClassicHistogramsToNHCB != nil && *c.ConvertClassicHistogramsToNHCB

@ -223,8 +223,8 @@ var expectedConf = &Config{
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFallbackProtocol: PrometheusText0_0_4,
ScrapeFailureLogFile: "testdata/fail_prom.log",
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -340,8 +340,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: 210,
ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4},
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -442,8 +442,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -502,8 +502,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -540,8 +540,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -584,8 +584,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -628,8 +628,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -662,8 +662,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -704,8 +704,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -743,8 +743,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -789,8 +789,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -825,8 +825,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -864,8 +864,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -896,8 +896,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -931,8 +931,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -966,8 +966,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1001,8 +1001,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1033,8 +1033,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1073,8 +1073,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1112,8 +1112,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1148,8 +1148,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1183,8 +1183,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1222,8 +1222,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1264,8 +1264,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1325,8 +1325,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1357,8 +1357,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1400,8 +1400,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1449,8 +1449,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1488,8 +1488,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1528,8 +1528,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1563,8 +1563,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@ -1600,8 +1600,8 @@ var expectedConf = &Config{
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: UTF8ValidationConfig,
MetricNameEscapingScheme: model.AllowUTF8,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),

@ -293,6 +293,10 @@ boundaries, the function uses interpolation to estimate the fraction. With the
resulting uncertainty, it becomes irrelevant if the boundaries are inclusive or
exclusive.
Special case for native histograms with standard exponential buckets:
`NaN` observations are considered outside of any buckets in this case.
`histogram_fraction(-Inf, +Inf, b)` effectively returns the fraction of
non-`NaN` observations and may therefore be less than 1.
## `histogram_quantile()`
@ -385,9 +389,16 @@ Special cases for classic histograms:
is applied within that bucket. Otherwise, the upper bound of the lowest
bucket is returned for quantiles located in the lowest bucket.
Special cases for native histograms (relevant for the exact interpolation
happening within the zero bucket):
Special cases for native histograms:
* If a native histogram with standard exponential buckets has `NaN`
observations and the quantile falls into one of the existing exponential
buckets, the result is skewed towards higher values due to `NaN`
observations treated as `+Inf`. This is flagged with an info level
annotation.
* If a native histogram with standard exponential buckets has `NaN`
observations and the quantile falls above all of the existing exponential
buckets, `NaN` is returned. This is flagged with an info level annotation.
* A zero bucket with finite width is assumed to contain no negative
observations if the histogram has observations in positive buckets, but none
in negative buckets.

@ -1364,9 +1364,11 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
}
hf, hfAnnos := HistogramFraction(lower, upper, sample.H, sample.Metric.Get(model.MetricNameLabel), args[0].PositionRange())
annos.Merge(hfAnnos)
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
F: HistogramFraction(lower, upper, sample.H),
F: hf,
DropName: true,
})
}
@ -1410,9 +1412,11 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
}
hq, hqAnnos := HistogramQuantile(q, sample.H, sample.Metric.Get(model.MetricNameLabel), args[0].PositionRange())
annos.Merge(hqAnnos)
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
F: HistogramQuantile(q, sample.H),
F: hq,
DropName: true,
})
}

@ -44,6 +44,7 @@ eval instant at 1m histogram_fraction(1, 2, single_histogram)
# We expect all values to fall in the range 0 < x <= 8.
eval instant at 1m histogram_fraction(0, 8, single_histogram)
expect no_info
{} 1
# Median is 1.414213562373095 (2**2**-1, or sqrt(2)) due to
@ -51,6 +52,7 @@ eval instant at 1m histogram_fraction(0, 8, single_histogram)
# 2 is assumed where the bucket boundary would be if we increased the
# resolution of the histogram by one step.
eval instant at 1m histogram_quantile(0.5, single_histogram)
expect no_info
{} 1.414213562373095
clear
@ -1328,3 +1330,46 @@ eval range from 0s to 60s step 15s last_over_time({__name__="http_request_durati
{__name__="http_request_duration_seconds", pod="nginx-1"} {{count:3 sum:14 buckets:[1 2]}}x4
clear
# Test native histogram quantile and fraction when the native histogram with exponential
# buckets has NaN observations.
load 1m
histogram_nan{case="100% NaNs"} {{schema:0 count:0 sum:0}} {{schema:0 count:3 sum:NaN}}
histogram_nan{case="20% NaNs"} {{schema:0 count:0 sum:0}} {{schema:0 count:15 sum:NaN buckets:[12]}}
eval instant at 1m histogram_quantile(1, histogram_nan)
expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan"
{case="100% NaNs"} NaN
{case="20% NaNs"} NaN
eval instant at 1m histogram_quantile(0.81, histogram_nan)
expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan"
{case="100% NaNs"} NaN
{case="20% NaNs"} NaN
eval instant at 1m histogram_quantile(0.8, histogram_nan{case="100% NaNs"})
expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan"
{case="100% NaNs"} NaN
eval instant at 1m histogram_quantile(0.8, histogram_nan{case="20% NaNs"})
expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is skewed higher for metric name "histogram_nan"
{case="20% NaNs"} 1
eval instant at 1m histogram_quantile(0.4, histogram_nan{case="100% NaNs"})
expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan"
{case="100% NaNs"} NaN
# histogram_quantile and histogram_fraction equivalence if quantile is not NaN
eval instant at 1m histogram_quantile(0.4, histogram_nan{case="20% NaNs"})
expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is skewed higher for metric name "histogram_nan"
{case="20% NaNs"} 0.7071067811865475
eval instant at 1m histogram_fraction(-Inf, 0.7071067811865475, histogram_nan)
expect info msg: PromQL info: input to histogram_fraction has NaN observations, which are excluded from all fractions for metric name "histogram_nan"
{case="100% NaNs"} 0.0
{case="20% NaNs"} 0.4
eval instant at 1m histogram_fraction(-Inf, +Inf, histogram_nan)
expect info msg: PromQL info: input to histogram_fraction has NaN observations, which are excluded from all fractions for metric name "histogram_nan"
{case="100% NaNs"} 0.0
{case="20% NaNs"} 0.8

@ -20,7 +20,9 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/util/almost"
"github.com/prometheus/prometheus/util/annotations"
)
// smallDeltaTolerance is the threshold for relative deltas between classic
@ -195,24 +197,34 @@ func BucketQuantile(q float64, buckets Buckets) (float64, bool, bool) {
//
// If q is NaN, NaN is returned.
//
// If the native histogram has NaN observations and the quantile falls into
// an existing bucket, then an additional info level annotation is returned
// informing the user about possible skew to higher values as NaNs are
// considered +Inf in this case.
//
// If the native histogram has NaN observations and the quantile falls above
// all existing buckets, then NaN is returned along with an additional info
// level annotation informing the user that this has happened.
//
// HistogramQuantile is for calculating the histogram_quantile() of native
// histograms. See also: BucketQuantile for classic histograms.
//
// HistogramQuantile is exported as it may be used by other PromQL engine
// implementations.
func HistogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
func HistogramQuantile(q float64, h *histogram.FloatHistogram, metricName string, pos posrange.PositionRange) (float64, annotations.Annotations) {
if q < 0 {
return math.Inf(-1)
return math.Inf(-1), nil
}
if q > 1 {
return math.Inf(+1)
return math.Inf(+1), nil
}
if h.Count == 0 || math.IsNaN(q) {
return math.NaN()
return math.NaN(), nil
}
var (
annos annotations.Annotations
bucket histogram.Bucket[float64]
count float64
it histogram.BucketIterator[float64]
@ -255,12 +267,12 @@ func HistogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
if bucket.Lower == math.Inf(-1) {
// first bucket, with lower bound -Inf
if bucket.Upper <= 0 {
return bucket.Upper
return bucket.Upper, annos
}
bucket.Lower = 0
} else if bucket.Upper == math.Inf(1) {
// last bucket, with upper bound +Inf
return bucket.Lower
return bucket.Lower, annos
}
}
// Due to numerical inaccuracies, we could end up with a higher count
@ -270,21 +282,43 @@ func HistogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
}
// We could have hit the highest bucket without even reaching the rank
// (this should only happen if the histogram contains observations of
// the value NaN), in which case we simply return the upper limit of the
// highest explicit bucket.
// the value NaN, in which case Sum is also NaN), in which case we simply
// return NaN.
// See https://github.com/prometheus/prometheus/issues/16578
if count < rank {
return bucket.Upper
if math.IsNaN(h.Sum) {
return math.NaN(), annos.Add(annotations.NewNativeHistogramQuantileNaNResultInfo(metricName, pos))
}
// This should not happen. Either NaNs are in the +Inf bucket (NHCB) and
// then count >= rank, or Sum is set to NaN. Might be a precision issue
// or something wrong with the histogram, fall back to returning the
// upper bound of the highest explicit bucket.
return bucket.Upper, annos
}
// NaN observations increase h.Count but not the total number of
// observations in the buckets. Therefore, we have to use the forward
// iterator to find percentiles. We recognize histograms containing NaN
// observations by checking if their h.Sum is NaN.
// iterator to find percentiles.
if math.IsNaN(h.Sum) || q < 0.5 {
rank -= count - bucket.Count
} else {
rank = count - rank
}
// We recognize histograms containing NaN observations by checking if their
// h.Sum is NaN and total number of observations is higher than the h.Count.
// If the latter is lost in precision, then the skew isn't worth reporting
// anyway. If the number is not greater, then the histogram observed -Inf
// and +Inf at some point, which made the Sum == NaN.
if math.IsNaN(h.Sum) {
// Detect if h.Count is greater than sum of buckets.
for it.Next() {
bucket = it.At()
count += bucket.Count
}
if count < h.Count {
annos.Add(annotations.NewNativeHistogramQuantileNaNSkewInfo(metricName, pos))
}
}
// The fraction of how far we are into the current bucket.
fraction := rank / bucket.Count
@ -292,7 +326,7 @@ func HistogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
// Return linear interpolation for custom buckets and for quantiles that
// end up in the zero bucket.
if h.UsesCustomBuckets() || (bucket.Lower <= 0 && bucket.Upper >= 0) {
return bucket.Lower + (bucket.Upper-bucket.Lower)*fraction
return bucket.Lower + (bucket.Upper-bucket.Lower)*fraction, annos
}
// For exponential buckets, we interpolate on a logarithmic scale. On a
@ -305,10 +339,10 @@ func HistogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
logLower := math.Log2(math.Abs(bucket.Lower))
logUpper := math.Log2(math.Abs(bucket.Upper))
if bucket.Lower > 0 { // Positive bucket.
return math.Exp2(logLower + (logUpper-logLower)*fraction)
return math.Exp2(logLower + (logUpper-logLower)*fraction), annos
}
// Otherwise, we are in a negative bucket and have to mirror things.
return -math.Exp2(logUpper + (logLower-logUpper)*(1-fraction))
return -math.Exp2(logUpper + (logLower-logUpper)*(1-fraction)), annos
}
// HistogramFraction calculates the fraction of observations between the
@ -343,23 +377,29 @@ func HistogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
//
// If lower >= upper and the histogram has at least 1 observation, zero is returned.
//
// If the histogram has NaN observations, these are not considered in any bucket
// thus histogram_fraction(-Inf, +Inf, v) might be less than 1.0. The function
// returns an info level annotation in this case.
//
// HistogramFraction is exported as it may be used by other PromQL engine
// implementations.
func HistogramFraction(lower, upper float64, h *histogram.FloatHistogram) float64 {
func HistogramFraction(lower, upper float64, h *histogram.FloatHistogram, metricName string, pos posrange.PositionRange) (float64, annotations.Annotations) {
if h.Count == 0 || math.IsNaN(lower) || math.IsNaN(upper) {
return math.NaN()
return math.NaN(), nil
}
if lower >= upper {
return 0
return 0, nil
}
var (
rank, lowerRank, upperRank float64
lowerSet, upperSet bool
it = h.AllBucketIterator()
count, rank, lowerRank, upperRank float64
lowerSet, upperSet bool
it = h.AllBucketIterator()
annos annotations.Annotations
)
for it.Next() {
b := it.At()
count += b.Count
zeroBucket := false
// interpolateLinearly is used for custom buckets to be
@ -438,14 +478,28 @@ func HistogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6
}
rank += b.Count
}
if !lowerSet || lowerRank > h.Count {
lowerRank = h.Count
if math.IsNaN(h.Sum) {
// There might be NaN observations, so we need to adjust
// the count to only include non `NaN` observations.
for it.Next() {
b := it.At()
count += b.Count
}
if count < h.Count {
annos.Add(annotations.NewNativeHistogramFractionNaNsInfo(metricName, pos))
}
} else {
count = h.Count
}
if !lowerSet || lowerRank > count {
lowerRank = count
}
if !upperSet || upperRank > h.Count {
upperRank = h.Count
if !upperSet || upperRank > count {
upperRank = count
}
return (upperRank - lowerRank) / h.Count
return (upperRank - lowerRank) / h.Count, annos
}
// BucketFraction is a version of HistogramFraction for classic histograms.

@ -154,7 +154,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
return nil, fmt.Errorf("invalid metric name validation scheme: %w", err)
}
var escapingScheme model.EscapingScheme
escapingScheme, err = model.ToEscapingScheme(cfg.MetricNameEscapingScheme)
escapingScheme, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, validationScheme)
if err != nil {
return nil, fmt.Errorf("invalid metric name escaping scheme, %w", err)
}

@ -77,7 +77,7 @@ func TranslatorMetricFromOtelMetric(metric pmetric.Metric) otlptranslator.Metric
case pmetric.MetricTypeGauge:
m.Type = otlptranslator.MetricTypeGauge
case pmetric.MetricTypeSum:
if metric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityCumulative {
if metric.Sum().IsMonotonic() {
m.Type = otlptranslator.MetricTypeMonotonicCounter
} else {
m.Type = otlptranslator.MetricTypeNonMonotonicCounter

@ -23,7 +23,6 @@ import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/prometheus/otlptranslator"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pcommon"
@ -36,67 +35,105 @@ import (
)
func TestFromMetrics(t *testing.T) {
for _, keepIdentifyingResourceAttributes := range []bool{false, true} {
t.Run(fmt.Sprintf("successful/keepIdentifyingAttributes=%v", keepIdentifyingResourceAttributes), func(t *testing.T) {
converter := NewPrometheusConverter()
payload := createExportRequest(5, 128, 128, 2, 0)
var expMetadata []prompb.MetricMetadata
resourceMetricsSlice := payload.Metrics().ResourceMetrics()
for i := 0; i < resourceMetricsSlice.Len(); i++ {
scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics()
for j := 0; j < scopeMetricsSlice.Len(); j++ {
metricSlice := scopeMetricsSlice.At(j).Metrics()
for k := 0; k < metricSlice.Len(); k++ {
metric := metricSlice.At(k)
namer := otlptranslator.MetricNamer{}
promName := namer.Build(TranslatorMetricFromOtelMetric(metric))
expMetadata = append(expMetadata, prompb.MetricMetadata{
Type: otelMetricTypeToPromMetricType(metric),
MetricFamilyName: promName,
Help: metric.Description(),
Unit: metric.Unit(),
})
t.Run("Successful", func(t *testing.T) {
for _, tc := range []struct {
name string
settings Settings
temporality pmetric.AggregationTemporality
}{
{
name: "Default with cumulative temporality",
settings: Settings{},
temporality: pmetric.AggregationTemporalityCumulative,
},
{
name: "Default with delta temporality",
settings: Settings{
AllowDeltaTemporality: true,
},
temporality: pmetric.AggregationTemporalityDelta,
},
{
name: "Keep identifying attributes",
settings: Settings{
KeepIdentifyingResourceAttributes: true,
},
temporality: pmetric.AggregationTemporalityCumulative,
},
{
name: "Add metric suffixes with cumulative temporality",
settings: Settings{
AddMetricSuffixes: true,
},
temporality: pmetric.AggregationTemporalityCumulative,
},
{
name: "Add metric suffixes with delta temporality",
settings: Settings{
AddMetricSuffixes: true,
AllowDeltaTemporality: true,
},
temporality: pmetric.AggregationTemporalityDelta,
},
} {
t.Run(tc.name, func(t *testing.T) {
converter := NewPrometheusConverter()
payload, wantPromMetrics := createExportRequest(5, 128, 128, 2, 0, tc.settings, tc.temporality)
var expMetadata []prompb.MetricMetadata
seenFamilyNames := map[string]struct{}{}
for _, wantMetric := range wantPromMetrics {
if _, exists := seenFamilyNames[wantMetric.familyName]; exists {
continue
}
if wantMetric.familyName == "target_info" {
continue
}
}
}
annots, err := converter.FromMetrics(
context.Background(),
payload.Metrics(),
Settings{KeepIdentifyingResourceAttributes: keepIdentifyingResourceAttributes},
)
require.NoError(t, err)
require.Empty(t, annots)
if diff := cmp.Diff(expMetadata, converter.Metadata()); diff != "" {
t.Errorf("mismatch (-want +got):\n%s", diff)
}
seenFamilyNames[wantMetric.familyName] = struct{}{}
expMetadata = append(expMetadata, prompb.MetricMetadata{
Type: wantMetric.metricType,
MetricFamilyName: wantMetric.familyName,
Help: wantMetric.description,
Unit: wantMetric.unit,
})
}
ts := converter.TimeSeries()
require.Len(t, ts, 1408+1) // +1 for the target_info.
tgtInfoCount := 0
for _, s := range ts {
b := labels.NewScratchBuilder(2)
lbls := s.ToLabels(&b, nil)
if lbls.Get(labels.MetricName) == "target_info" {
tgtInfoCount++
require.Equal(t, "test-namespace/test-service", lbls.Get("job"))
require.Equal(t, "id1234", lbls.Get("instance"))
if keepIdentifyingResourceAttributes {
require.Equal(t, "test-service", lbls.Get("service_name"))
require.Equal(t, "test-namespace", lbls.Get("service_namespace"))
require.Equal(t, "id1234", lbls.Get("service_instance_id"))
} else {
require.False(t, lbls.Has("service_name"))
require.False(t, lbls.Has("service_namespace"))
require.False(t, lbls.Has("service_instance_id"))
annots, err := converter.FromMetrics(
context.Background(),
payload.Metrics(),
tc.settings,
)
require.NoError(t, err)
require.Empty(t, annots)
testutil.RequireEqual(t, expMetadata, converter.Metadata())
ts := converter.TimeSeries()
require.Len(t, ts, 1536+1) // +1 for the target_info.
tgtInfoCount := 0
for _, s := range ts {
b := labels.NewScratchBuilder(2)
lbls := s.ToLabels(&b, nil)
if lbls.Get(labels.MetricName) == "target_info" {
tgtInfoCount++
require.Equal(t, "test-namespace/test-service", lbls.Get("job"))
require.Equal(t, "id1234", lbls.Get("instance"))
if tc.settings.KeepIdentifyingResourceAttributes {
require.Equal(t, "test-service", lbls.Get("service_name"))
require.Equal(t, "test-namespace", lbls.Get("service_namespace"))
require.Equal(t, "id1234", lbls.Get("service_instance_id"))
} else {
require.False(t, lbls.Has("service_name"))
require.False(t, lbls.Has("service_namespace"))
require.False(t, lbls.Has("service_instance_id"))
}
}
}
}
require.Equal(t, 1, tgtInfoCount)
})
}
require.Equal(t, 1, tgtInfoCount)
})
}
})
for _, convertHistogramsToNHCB := range []bool{false, true} {
t.Run(fmt.Sprintf("successful/convertHistogramsToNHCB=%v", convertHistogramsToNHCB), func(t *testing.T) {
@ -144,25 +181,27 @@ func TestFromMetrics(t *testing.T) {
}
t.Run("context cancellation", func(t *testing.T) {
settings := Settings{}
converter := NewPrometheusConverter()
ctx, cancel := context.WithCancel(context.Background())
// Verify that converter.FromMetrics respects cancellation.
cancel()
payload := createExportRequest(5, 128, 128, 2, 0)
payload, _ := createExportRequest(5, 128, 128, 2, 0, settings, pmetric.AggregationTemporalityCumulative)
annots, err := converter.FromMetrics(ctx, payload.Metrics(), Settings{})
annots, err := converter.FromMetrics(ctx, payload.Metrics(), settings)
require.ErrorIs(t, err, context.Canceled)
require.Empty(t, annots)
})
t.Run("context timeout", func(t *testing.T) {
settings := Settings{}
converter := NewPrometheusConverter()
// Verify that converter.FromMetrics respects timeout.
ctx, cancel := context.WithTimeout(context.Background(), 0)
t.Cleanup(cancel)
payload := createExportRequest(5, 128, 128, 2, 0)
payload, _ := createExportRequest(5, 128, 128, 2, 0, settings, pmetric.AggregationTemporalityCumulative)
annots, err := converter.FromMetrics(ctx, payload.Metrics(), Settings{})
annots, err := converter.FromMetrics(ctx, payload.Metrics(), settings)
require.ErrorIs(t, err, context.DeadlineExceeded)
require.Empty(t, annots)
})
@ -693,6 +732,139 @@ func sortTimeSeries(series []prompb.TimeSeries) []prompb.TimeSeries {
return series
}
func TestTranslatorMetricFromOtelMetric(t *testing.T) {
tests := []struct {
name string
inputMetric pmetric.Metric
expectedMetric otlptranslator.Metric
}{
{
name: "gauge metric",
inputMetric: createOTelGaugeForTranslator("test_gauge", "bytes", "Test gauge metric"),
expectedMetric: otlptranslator.Metric{
Name: "test_gauge",
Unit: "bytes",
Type: otlptranslator.MetricTypeGauge,
},
},
{
name: "monotonic sum metric",
inputMetric: createOTelSumForTranslator("test_sum", "count", "Test sum metric", true),
expectedMetric: otlptranslator.Metric{
Name: "test_sum",
Unit: "count",
Type: otlptranslator.MetricTypeMonotonicCounter,
},
},
{
name: "non-monotonic sum metric",
inputMetric: createOTelSumForTranslator("test_sum", "count", "Test sum metric", false),
expectedMetric: otlptranslator.Metric{
Name: "test_sum",
Unit: "count",
Type: otlptranslator.MetricTypeNonMonotonicCounter,
},
},
{
name: "histogram metric",
inputMetric: createOTelHistogramForTranslator("test_histogram", "seconds", "Test histogram metric"),
expectedMetric: otlptranslator.Metric{
Name: "test_histogram",
Unit: "seconds",
Type: otlptranslator.MetricTypeHistogram,
},
},
{
name: "exponential histogram metric",
inputMetric: createOTelExponentialHistogramForTranslator("test_exp_histogram", "milliseconds", "Test exponential histogram metric"),
expectedMetric: otlptranslator.Metric{
Name: "test_exp_histogram",
Unit: "milliseconds",
Type: otlptranslator.MetricTypeExponentialHistogram,
},
},
{
name: "summary metric",
inputMetric: createOTelSummaryForTranslator("test_summary", "duration", "Test summary metric"),
expectedMetric: otlptranslator.Metric{
Name: "test_summary",
Unit: "duration",
Type: otlptranslator.MetricTypeSummary,
},
},
{
name: "empty metric name and unit",
inputMetric: createOTelGaugeForTranslator("", "", ""),
expectedMetric: otlptranslator.Metric{
Name: "",
Unit: "",
Type: otlptranslator.MetricTypeGauge,
},
},
{
name: "empty metric type defaults to unknown",
inputMetric: createOTelEmptyMetricForTranslator("test_empty"),
expectedMetric: otlptranslator.Metric{
Name: "test_empty",
Unit: "",
Type: otlptranslator.MetricTypeUnknown,
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
result := TranslatorMetricFromOtelMetric(tc.inputMetric)
require.Equal(t, tc.expectedMetric, result)
})
}
}
func createOTelMetricForTranslator(name, unit, description string) pmetric.Metric {
m := pmetric.NewMetric()
m.SetName(name)
m.SetUnit(unit)
m.SetDescription(description)
return m
}
func createOTelGaugeForTranslator(name, unit, description string) pmetric.Metric {
m := createOTelMetricForTranslator(name, unit, description)
m.SetEmptyGauge()
return m
}
func createOTelSumForTranslator(name, unit, description string, isMonotonic bool) pmetric.Metric {
m := createOTelMetricForTranslator(name, unit, description)
sum := m.SetEmptySum()
sum.SetIsMonotonic(isMonotonic)
return m
}
func createOTelHistogramForTranslator(name, unit, description string) pmetric.Metric {
m := createOTelMetricForTranslator(name, unit, description)
m.SetEmptyHistogram()
return m
}
func createOTelExponentialHistogramForTranslator(name, unit, description string) pmetric.Metric {
m := createOTelMetricForTranslator(name, unit, description)
m.SetEmptyExponentialHistogram()
return m
}
func createOTelSummaryForTranslator(name, unit, description string) pmetric.Metric {
m := createOTelMetricForTranslator(name, unit, description)
m.SetEmptySummary()
return m
}
func createOTelEmptyMetricForTranslator(name string) pmetric.Metric {
m := pmetric.NewMetric()
m.SetName(name)
return m
}
func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
for _, resourceAttributeCount := range []int{0, 5, 50} {
b.Run(fmt.Sprintf("resource attribute count: %v", resourceAttributeCount), func(b *testing.B) {
@ -711,12 +883,21 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
b.Run(fmt.Sprintf("labels per metric: %v", labelsPerMetric), func(b *testing.B) {
for _, exemplarsPerSeries := range []int{0, 5, 10} {
b.Run(fmt.Sprintf("exemplars per series: %v", exemplarsPerSeries), func(b *testing.B) {
payload := createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCount, labelsPerMetric, exemplarsPerSeries)
settings := Settings{}
payload, _ := createExportRequest(
resourceAttributeCount,
histogramCount,
nonHistogramCount,
labelsPerMetric,
exemplarsPerSeries,
settings,
pmetric.AggregationTemporalityCumulative,
)
b.ResetTimer()
for range b.N {
converter := NewPrometheusConverter()
annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{})
annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
require.NoError(b, err)
require.Empty(b, annots)
require.NotNil(b, converter.TimeSeries())
@ -734,7 +915,15 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
}
}
func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCount, labelsPerMetric, exemplarsPerSeries int) pmetricotlp.ExportRequest {
type wantPrometheusMetric struct {
name string
familyName string
metricType prompb.MetricMetadata_MetricType
description string
unit string
}
func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCount, labelsPerMetric, exemplarsPerSeries int, settings Settings, temporality pmetric.AggregationTemporality) (pmetricotlp.ExportRequest, []wantPrometheusMetric) {
request := pmetricotlp.NewExportRequest()
rm := request.Metrics().ResourceMetrics().AppendEmpty()
@ -752,13 +941,18 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou
metrics := rm.ScopeMetrics().AppendEmpty().Metrics()
ts := pcommon.NewTimestampFromTime(time.Now())
var suffix string
if settings.AddMetricSuffixes {
suffix = "_unit"
}
var wantPromMetrics []wantPrometheusMetric
for i := 1; i <= histogramCount; i++ {
m := metrics.AppendEmpty()
m.SetEmptyHistogram()
m.SetName(fmt.Sprintf("histogram-%v", i))
m.SetDescription("histogram")
m.SetUnit("unit")
m.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.Histogram().SetAggregationTemporality(temporality)
h := m.Histogram().DataPoints().AppendEmpty()
h.SetTimestamp(ts)
@ -770,20 +964,96 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou
generateAttributes(h.Attributes(), "series", labelsPerMetric)
generateExemplars(h.Exemplars(), exemplarsPerSeries, ts)
metricType := prompb.MetricMetadata_HISTOGRAM
if temporality != pmetric.AggregationTemporalityCumulative {
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
// We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now.
metricType = prompb.MetricMetadata_UNKNOWN
}
wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{
name: fmt.Sprintf("histogram_%d%s_bucket", i, suffix),
familyName: fmt.Sprintf("histogram_%d%s", i, suffix),
metricType: metricType,
unit: "unit",
description: "histogram",
})
wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{
name: fmt.Sprintf("histogram_%d%s_count", i, suffix),
familyName: fmt.Sprintf("histogram_%d%s", i, suffix),
metricType: metricType,
unit: "unit",
description: "histogram",
})
wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{
name: fmt.Sprintf("histogram_%d%s_sum", i, suffix),
familyName: fmt.Sprintf("histogram_%d%s", i, suffix),
metricType: metricType,
unit: "unit",
description: "histogram",
})
}
for i := 1; i <= nonHistogramCount; i++ {
m := metrics.AppendEmpty()
m.SetEmptySum()
m.SetName(fmt.Sprintf("non.monotonic.sum-%v", i))
m.SetDescription("sum")
m.SetUnit("unit")
m.Sum().SetAggregationTemporality(temporality)
point := m.Sum().DataPoints().AppendEmpty()
point.SetTimestamp(ts)
point.SetDoubleValue(1.23)
generateAttributes(point.Attributes(), "series", labelsPerMetric)
generateExemplars(point.Exemplars(), exemplarsPerSeries, ts)
metricType := prompb.MetricMetadata_GAUGE
if temporality != pmetric.AggregationTemporalityCumulative {
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
// We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now.
metricType = prompb.MetricMetadata_UNKNOWN
}
wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{
name: fmt.Sprintf("non_monotonic_sum_%d%s", i, suffix),
familyName: fmt.Sprintf("non_monotonic_sum_%d%s", i, suffix),
metricType: metricType,
unit: "unit",
description: "sum",
})
}
for i := 1; i <= nonHistogramCount; i++ {
m := metrics.AppendEmpty()
m.SetEmptySum()
m.SetName(fmt.Sprintf("sum-%v", i))
m.SetName(fmt.Sprintf("monotonic.sum-%v", i))
m.SetDescription("sum")
m.SetUnit("unit")
m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.Sum().SetAggregationTemporality(temporality)
m.Sum().SetIsMonotonic(true)
point := m.Sum().DataPoints().AppendEmpty()
point.SetTimestamp(ts)
point.SetDoubleValue(1.23)
generateAttributes(point.Attributes(), "series", labelsPerMetric)
generateExemplars(point.Exemplars(), exemplarsPerSeries, ts)
var counterSuffix string
if settings.AddMetricSuffixes {
counterSuffix = suffix + "_total"
}
metricType := prompb.MetricMetadata_COUNTER
if temporality != pmetric.AggregationTemporalityCumulative {
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
// We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now.
metricType = prompb.MetricMetadata_UNKNOWN
}
wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{
name: fmt.Sprintf("monotonic_sum_%d%s", i, counterSuffix),
familyName: fmt.Sprintf("monotonic_sum_%d%s", i, counterSuffix),
metricType: metricType,
unit: "unit",
description: "sum",
})
}
for i := 1; i <= nonHistogramCount; i++ {
@ -797,9 +1067,21 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou
point.SetDoubleValue(1.23)
generateAttributes(point.Attributes(), "series", labelsPerMetric)
generateExemplars(point.Exemplars(), exemplarsPerSeries, ts)
wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{
name: fmt.Sprintf("gauge_%d%s", i, suffix),
familyName: fmt.Sprintf("gauge_%d%s", i, suffix),
metricType: prompb.MetricMetadata_GAUGE,
unit: "unit",
description: "gauge",
})
}
return request
wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{
name: "target_info",
familyName: "target_info",
})
return request, wantPromMetrics
}
func generateAttributes(m pcommon.Map, prefix string, count int) {

@ -152,6 +152,9 @@ var (
IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo)
HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo)
HistogramIgnoredInMixedRangeInfo = fmt.Errorf("%w: ignored histograms in a range containing both floats and histograms for metric name", PromQLInfo)
NativeHistogramQuantileNaNResultInfo = fmt.Errorf("%w: input to histogram_quantile has NaN observations, result is NaN for metric name", PromQLInfo)
NativeHistogramQuantileNaNSkewInfo = fmt.Errorf("%w: input to histogram_quantile has NaN observations, result is skewed higher for metric name", PromQLInfo)
NativeHistogramFractionNaNsInfo = fmt.Errorf("%w: input to histogram_fraction has NaN observations, which are excluded from all fractions for metric name", PromQLInfo)
)
type annoErr struct {
@ -324,3 +327,24 @@ func NewIncompatibleBucketLayoutInBinOpWarning(operator string, pos posrange.Pos
Err: fmt.Errorf("%w %s", IncompatibleBucketLayoutInBinOpWarning, operator),
}
}
func NewNativeHistogramQuantileNaNResultInfo(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", NativeHistogramQuantileNaNResultInfo, metricName),
}
}
func NewNativeHistogramQuantileNaNSkewInfo(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", NativeHistogramQuantileNaNSkewInfo, metricName),
}
}
func NewNativeHistogramFractionNaNsInfo(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", NativeHistogramFractionNaNsInfo, metricName),
}
}

Loading…
Cancel
Save