feature: type-and-unit-labels (PROM-39 implementation) (#16228)

* feature: type-and-unit-labels (extended MetricIdentity)

Experimental implementation of https://github.com/prometheus/proposals/pull/39

Previous (unmerged) experiments:
* https://github.com/prometheus/prometheus/compare/main...dashpole:prometheus:type_and_unit_labels
* https://github.com/prometheus/prometheus/pull/16025

Signed-off-by: bwplotka <bwplotka@gmail.com>

feature: type-and-unit-labels (extended MetricIdentity)

Experimental implementation of https://github.com/prometheus/proposals/pull/39

Previous (unmerged) experiments:
* https://github.com/prometheus/prometheus/compare/main...dashpole:prometheus:type_and_unit_labels
* https://github.com/prometheus/prometheus/pull/16025

Signed-off-by: bwplotka <bwplotka@gmail.com>

* Fix compilation errors

Signed-off-by: Arthur Silva Sens <arthursens2005@gmail.com>

Lint

Signed-off-by: Arthur Silva Sens <arthursens2005@gmail.com>

Revert change made to protobuf 'Accept' header

Signed-off-by: Arthur Silva Sens <arthursens2005@gmail.com>

Fix compilation errors for 'dedupelabels' tag

Signed-off-by: Arthur Silva Sens <arthursens2005@gmail.com>

* Rectored into schema.Metadata

Signed-off-by: bwplotka <bwplotka@gmail.com>

* texparse: Added tests for PromParse

Signed-off-by: bwplotka <bwplotka@gmail.com>

* add OM tests.

Signed-off-by: bwplotka <bwplotka@gmail.com>

* add proto tests

Signed-off-by: bwplotka <bwplotka@gmail.com>

* Addressed comments.

Signed-off-by: bwplotka <bwplotka@gmail.com>

* add schema label tests.

Signed-off-by: bwplotka <bwplotka@gmail.com>

* addressed comments.

Signed-off-by: bwplotka <bwplotka@gmail.com>

* fix tests.

Signed-off-by: bwplotka <bwplotka@gmail.com>

* add promql tests.

Signed-off-by: bwplotka <bwplotka@gmail.com>

* lint

Signed-off-by: bwplotka <bwplotka@gmail.com>

* Addressed comments.

Signed-off-by: bwplotka <bwplotka@gmail.com>

---------

Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arthur Silva Sens <arthursens2005@gmail.com>
Co-authored-by: Arthur Silva Sens <arthursens2005@gmail.com>
pull/15365/head^2
Bartlomiej Plotka 1 month ago committed by GitHub
parent 5a98246f50
commit 8e6b008608
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 3
      cmd/prometheus/main.go
  2. 25
      docs/feature_flags.md
  3. 25
      model/labels/labels.go
  4. 12
      model/labels/labels_common.go
  5. 17
      model/labels/labels_dedupelabels.go
  6. 17
      model/labels/labels_stringlabels.go
  7. 16
      model/labels/labels_test.go
  8. 8
      model/textparse/benchmark_test.go
  9. 11
      model/textparse/interface.go
  10. 2
      model/textparse/interface_test.go
  11. 4
      model/textparse/nhcbparse_test.go
  12. 73
      model/textparse/openmetricsparse.go
  13. 877
      model/textparse/openmetricsparse_test.go
  14. 32
      model/textparse/promparse.go
  15. 472
      model/textparse/promparse_test.go
  16. 36
      model/textparse/protobufparse.go
  17. 684
      model/textparse/protobufparse_test.go
  18. 10
      prompb/io/prometheus/client/decoder.go
  19. 26
      promql/engine.go
  20. 17
      promql/functions.go
  21. 2
      promql/fuzz.go
  22. 280
      promql/promqltest/testdata/type_and_unit.test
  23. 157
      schema/labels.go
  24. 153
      schema/labels_test.go
  25. 3
      scrape/manager.go
  26. 6
      scrape/scrape.go
  27. 5
      scrape/scrape_test.go
  28. 6
      web/federate.go
  29. 2
      web/federate_test.go

@ -290,6 +290,9 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
// See proposal: https://github.com/prometheus/proposals/pull/48
c.web.NativeOTLPDeltaIngestion = true
logger.Info("Enabling native ingestion of delta OTLP metrics, storing the raw sample values without conversion. WARNING: Delta support is in an early stage of development. The ingestion and querying process is likely to change over time.")
case "type-and-unit-labels":
c.scrape.EnableTypeAndUnitLabels = true
logger.Info("Experimental type and unit labels enabled")
default:
logger.Warn("Unknown option for --enable-feature", "option", o)
}

@ -247,3 +247,28 @@ These may not work well if the `<range>` is not a multiple of the collection int
* It is difficult to figure out whether a metric has delta or cumulative temporality, since there's no indication of temporality in metric names or labels. For now, if you are ingesting a mix of delta and cumulative metrics we advise you to explicitly add your own labels to distinguish them. In the future, we plan to introduce type labels to consistently distinguish metric types and potentially make PromQL functions type-aware (e.g. providing warnings when cumulative-only functions are used with delta metrics).
* If there are multiple samples being ingested at the same timestamp, only one of the points is kept - the samples are **not** summed together (this is how Prometheus works in general - duplicate timestamp samples are rejected). Any aggregation will have to be done before sending samples to Prometheus.
## Type and Unit Labels
`--enable-feature=type-and-unit-labels`
When enabled, Prometheus will start injecting additional, reserved `__type__`
and `__unit__` labels as designed in the [PROM-39 proposal](https://github.com/prometheus/proposals/pull/39).
Those labels are sourced from the metadata structured of the existing scrape and ingestion formats
like OpenMetrics Text, Prometheus Text, Prometheus Proto, Remote Write 2 and OTLP. All the user provided labels with
`__type__` and `__unit__` will be overridden.
PromQL layer will handle those labels the same way __name__ is handled, e.g. dropped
on certain operations like `-` or `+` and affected by `promql-delayed-name-removal` feature.
This feature enables important metadata information to be accessible directly with samples and PromQL layer.
It's especially useful for users who:
* Want to be able to select metrics based on type or unit.
* Want to handle cases of series with the same metric name and different type and units.
e.g. native histogram migrations or OpenTelemetry metrics from OTLP endpoint, without translation.
In future more [work is planned](https://github.com/prometheus/prometheus/issues/16610) that will depend on this e.g. rich PromQL UX that helps
when wrong types are used on wrong functions, automatic renames, delta types and more.

@ -336,16 +336,29 @@ func (ls Labels) Validate(f func(l Label) error) error {
return nil
}
// DropMetricName returns Labels with "__name__" removed.
// DropMetricName returns Labels with the "__name__" removed.
// Deprecated: Use DropReserved instead.
func (ls Labels) DropMetricName() Labels {
return ls.DropReserved(func(n string) bool { return n == MetricName })
}
// DropReserved returns Labels without the chosen (via shouldDropFn) reserved (starting with underscore) labels.
func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels {
rm := 0
for i, l := range ls {
if l.Name == MetricName {
if l.Name[0] > '_' { // Stop looking if we've gone past special labels.
break
}
if shouldDropFn(l.Name) {
i := i - rm // Offsetting after removals.
if i == 0 { // Make common case fast with no allocations.
return ls[1:]
ls = ls[1:]
} else {
// Avoid modifying original Labels - use [:i:i] so that left slice would not
// have any spare capacity and append would have to allocate a new slice for the result.
ls = append(ls[:i:i], ls[i+1:]...)
}
// Avoid modifying original Labels - use [:i:i] so that left slice would not
// have any spare capacity and append would have to allocate a new slice for the result.
return append(ls[:i:i], ls[i+1:]...)
rm++
}
}
return ls

@ -24,10 +24,12 @@ import (
)
const (
MetricName = "__name__"
AlertName = "alertname"
BucketLabel = "le"
InstanceName = "instance"
// MetricName is a special label name that represent a metric name.
// Deprecated: Use schema.Metadata structure and its methods.
MetricName = "__name__"
AlertName = "alertname"
BucketLabel = "le"
labelSep = '\xfe' // Used at beginning of `Bytes` return.
sep = '\xff' // Used between labels in `Bytes` and `Hash`.
@ -35,7 +37,7 @@ const (
var seps = []byte{sep} // Used with Hash, which has no WriteByte method.
// Label is a key/value pair of strings.
// Label is a key/value a pair of strings.
type Label struct {
Name, Value string
}

@ -554,20 +554,27 @@ func (ls Labels) ReleaseStrings(release func(string)) {
// TODO: remove these calls as there is nothing to do.
}
// DropMetricName returns Labels with "__name__" removed.
// DropMetricName returns Labels with the "__name__" removed.
// Deprecated: Use DropReserved instead.
func (ls Labels) DropMetricName() Labels {
return ls.DropReserved(func(n string) bool { return n == MetricName })
}
// DropReserved returns Labels without the chosen (via shouldDropFn) reserved (starting with underscore) labels.
func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels {
for i := 0; i < len(ls.data); {
lName, i2 := decodeString(ls.syms, ls.data, i)
_, i2 = decodeVarint(ls.data, i2)
if lName == MetricName {
if lName[0] > '_' { // Stop looking if we've gone past special labels.
break
}
if shouldDropFn(lName) {
if i == 0 { // Make common case fast with no allocations.
ls.data = ls.data[i2:]
} else {
ls.data = ls.data[:i] + ls.data[i2:]
}
break
} else if lName[0] > MetricName[0] { // Stop looking if we've gone past.
break
continue
}
i = i2
}

@ -413,21 +413,28 @@ func (ls Labels) Validate(f func(l Label) error) error {
return nil
}
// DropMetricName returns Labels with "__name__" removed.
// DropMetricName returns Labels with the "__name__" removed.
// Deprecated: Use DropReserved instead.
func (ls Labels) DropMetricName() Labels {
return ls.DropReserved(func(n string) bool { return n == MetricName })
}
// DropReserved returns Labels without the chosen (via shouldDropFn) reserved (starting with underscore) labels.
func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels {
for i := 0; i < len(ls.data); {
lName, i2 := decodeString(ls.data, i)
size, i2 := decodeSize(ls.data, i2)
i2 += size
if lName == MetricName {
if lName[0] > '_' { // Stop looking if we've gone past special labels.
break
}
if shouldDropFn(lName) {
if i == 0 { // Make common case fast with no allocations.
ls.data = ls.data[i2:]
} else {
ls.data = ls.data[:i] + ls.data[i2:]
}
break
} else if lName[0] > MetricName[0] { // Stop looking if we've gone past.
break
continue
}
i = i2
}

@ -523,11 +523,25 @@ func TestLabels_DropMetricName(t *testing.T) {
require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "aaa", "111").DropMetricName()))
original := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222")
check := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222")
check := original.Copy()
require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), check.DropMetricName()))
require.True(t, Equal(original, check))
}
func TestLabels_DropReserved(t *testing.T) {
shouldDropFn := func(n string) bool {
return n == MetricName || n == "__something__"
}
require.True(t, Equal(FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").DropReserved(shouldDropFn)))
require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "aaa", "111").DropReserved(shouldDropFn)))
require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "__something__", string(model.MetricTypeCounter), "aaa", "111").DropReserved(shouldDropFn)))
original := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222")
check := original.Copy()
require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), check.DropReserved(shouldDropFn)))
require.True(t, Equal(original, check))
}
func ScratchBuilderForBenchmark() ScratchBuilder {
// (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.)
b := NewScratchBuilder(256)

@ -144,10 +144,12 @@ func benchParse(b *testing.B, data []byte, parser string) {
var newParserFn newParser
switch parser {
case "promtext":
newParserFn = NewPromParser
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
return NewPromParser(b, st, false)
}
case "promproto":
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
return NewProtobufParser(b, true, st)
return NewProtobufParser(b, true, false, st)
}
case "omtext":
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
@ -273,7 +275,7 @@ func BenchmarkCreatedTimestampPromProto(b *testing.B) {
data := createTestProtoBuf(b).Bytes()
st := labels.NewSymbolTable()
p := NewProtobufParser(data, true, st)
p := NewProtobufParser(data, true, false, st)
found := false
Inner:

@ -51,11 +51,13 @@ type Parser interface {
// Type returns the metric name and type in the current entry.
// Must only be called after Next returned a type entry.
// The returned byte slices become invalid after the next call to Next.
// TODO(bwplotka): Once type-and-unit-labels stabilizes we could remove this method.
Type() ([]byte, model.MetricType)
// Unit returns the metric name and unit in the current entry.
// Must only be called after Next returned a unit entry.
// The returned byte slices become invalid after the next call to Next.
// TODO(bwplotka): Once type-and-unit-labels stabilizes we could remove this method.
Unit() ([]byte, []byte)
// Comment returns the text of the current comment.
@ -128,19 +130,20 @@ func extractMediaType(contentType, fallbackType string) (string, error) {
// An error may also be returned if fallbackType had to be used or there was some
// other error parsing the supplied Content-Type.
// If the returned parser is nil then the scrape must fail.
func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) {
func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries, enableTypeAndUnitLabels bool, st *labels.SymbolTable) (Parser, error) {
mediaType, err := extractMediaType(contentType, fallbackType)
// err may be nil or something we want to warn about.
switch mediaType {
case "application/openmetrics-text":
return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) {
o.SkipCTSeries = skipOMCTSeries
o.skipCTSeries = skipOMCTSeries
o.enableTypeAndUnitLabels = enableTypeAndUnitLabels
}), err
case "application/vnd.google.protobuf":
return NewProtobufParser(b, parseClassicHistograms, st), err
return NewProtobufParser(b, parseClassicHistograms, enableTypeAndUnitLabels, st), err
case "text/plain":
return NewPromParser(b, st), err
return NewPromParser(b, st, enableTypeAndUnitLabels), err
default:
return nil, err
}

@ -168,7 +168,7 @@ func TestNewParser(t *testing.T) {
fallbackProtoMediaType := tt.fallbackScrapeProtocol.HeaderMediaType()
p, err := New([]byte{}, tt.contentType, fallbackProtoMediaType, false, false, labels.NewSymbolTable())
p, err := New([]byte{}, tt.contentType, fallbackProtoMediaType, false, false, false, labels.NewSymbolTable())
tt.validateParser(t, p)
if tt.err == "" {
require.NoError(t, err)

@ -598,7 +598,7 @@ func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) {
func() (string, parserFactory, []int, parserOptions) {
factory := func(keepClassic bool) Parser {
inputBuf := createTestProtoBufHistogram(t)
return NewProtobufParser(inputBuf.Bytes(), keepClassic, labels.NewSymbolTable())
return NewProtobufParser(inputBuf.Bytes(), keepClassic, false, labels.NewSymbolTable())
}
return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true}
},
@ -612,7 +612,7 @@ func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) {
func() (string, parserFactory, []int, parserOptions) {
factory := func(_ bool) Parser {
input := createTestPromHistogram()
return NewPromParser([]byte(input), labels.NewSymbolTable())
return NewPromParser([]byte(input), labels.NewSymbolTable(), false)
}
return "Prometheus", factory, []int{1}, parserOptions{}
},

@ -33,6 +33,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/schema"
)
type openMetricsLexer struct {
@ -81,10 +82,12 @@ type OpenMetricsParser struct {
mfNameLen int // length of metric family name to get from series.
text []byte
mtype model.MetricType
val float64
ts int64
hasTS bool
start int
unit string
val float64
ts int64
hasTS bool
start int
// offsets is a list of offsets into series that describe the positions
// of the metric name and label names and values for this series.
// p.offsets[0] is the start character of the metric name.
@ -106,12 +109,14 @@ type OpenMetricsParser struct {
ignoreExemplar bool
// visitedMFName is the metric family name of the last visited metric when peeking ahead
// for _created series during the execution of the CreatedTimestamp method.
visitedMFName []byte
skipCTSeries bool
visitedMFName []byte
skipCTSeries bool
enableTypeAndUnitLabels bool
}
type openMetricsParserOptions struct {
SkipCTSeries bool
skipCTSeries bool
enableTypeAndUnitLabels bool
}
type OpenMetricsOption func(*openMetricsParserOptions)
@ -125,7 +130,15 @@ type OpenMetricsOption func(*openMetricsParserOptions)
// best-effort compatibility.
func WithOMParserCTSeriesSkipped() OpenMetricsOption {
return func(o *openMetricsParserOptions) {
o.SkipCTSeries = true
o.skipCTSeries = true
}
}
// WithOMParserTypeAndUnitLabels enables type-and-unit-labels mode
// in which parser injects __type__ and __unit__ into labels.
func WithOMParserTypeAndUnitLabels() OpenMetricsOption {
return func(o *openMetricsParserOptions) {
o.enableTypeAndUnitLabels = true
}
}
@ -138,9 +151,10 @@ func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsO
}
parser := &OpenMetricsParser{
l: &openMetricsLexer{b: b},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
skipCTSeries: options.SkipCTSeries,
l: &openMetricsLexer{b: b},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
skipCTSeries: options.skipCTSeries,
enableTypeAndUnitLabels: options.enableTypeAndUnitLabels,
}
return parser
@ -187,7 +201,7 @@ func (p *OpenMetricsParser) Type() ([]byte, model.MetricType) {
// Must only be called after Next returned a unit entry.
// The returned byte slices become invalid after the next call to Next.
func (p *OpenMetricsParser) Unit() ([]byte, []byte) {
return p.l.b[p.offsets[0]:p.offsets[1]], p.text
return p.l.b[p.offsets[0]:p.offsets[1]], []byte(p.unit)
}
// Comment returns the text of the current comment.
@ -203,16 +217,28 @@ func (p *OpenMetricsParser) Labels(l *labels.Labels) {
p.builder.Reset()
metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start])
p.builder.Add(labels.MetricName, metricName)
m := schema.Metadata{
Name: metricName,
Type: p.mtype,
Unit: p.unit,
}
if p.enableTypeAndUnitLabels {
m.AddToLabels(&p.builder)
} else {
p.builder.Add(labels.MetricName, metricName)
}
for i := 2; i < len(p.offsets); i += 4 {
a := p.offsets[i] - p.start
b := p.offsets[i+1] - p.start
label := unreplace(s[a:b])
if p.enableTypeAndUnitLabels && !m.IsEmptyFor(label) {
// Dropping user provided metadata labels, if found in the OM metadata.
continue
}
c := p.offsets[i+2] - p.start
d := p.offsets[i+3] - p.start
value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d]))
p.builder.Add(label, value)
}
@ -283,7 +309,7 @@ func (p *OpenMetricsParser) CreatedTimestamp() int64 {
return p.ct
}
// Create a new lexer to reset the parser once this function is done executing.
// Create a new lexer and other core state details to reset the parser once this function is done executing.
resetLexer := &openMetricsLexer{
b: p.l.b,
i: p.l.i,
@ -291,15 +317,16 @@ func (p *OpenMetricsParser) CreatedTimestamp() int64 {
err: p.l.err,
state: p.l.state,
}
resetStart := p.start
resetMType := p.mtype
p.skipCTSeries = false
p.ignoreExemplar = true
savedStart := p.start
defer func() {
p.ignoreExemplar = false
p.start = savedStart
p.l = resetLexer
p.start = resetStart
p.mtype = resetMType
p.ignoreExemplar = false
}()
for {
@ -493,11 +520,11 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
case tType:
return EntryType, nil
case tUnit:
p.unit = string(p.text)
m := yoloString(p.l.b[p.offsets[0]:p.offsets[1]])
u := yoloString(p.text)
if len(u) > 0 {
if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' {
return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", u, m)
if len(p.unit) > 0 {
if !strings.HasSuffix(m, p.unit) || len(m) < len(p.unit)+1 || p.l.b[p.offsets[1]-len(p.unit)-1] != '_' {
return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", p.unit, m)
}
}
return EntryUnit, nil

@ -119,356 +119,539 @@ foobar{quantile="0.99"} 150.1`
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
input += "\n# EOF\n"
exp := []parsedEntry{
{
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
}, {
m: "go_gc_duration_seconds",
typ: model.MetricTypeSummary,
}, {
m: "go_gc_duration_seconds",
unit: "seconds",
}, {
m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
}, {
m: `go_gc_duration_seconds{quantile="0.25"}`,
v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
}, {
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
}, {
m: "nohelp1",
help: "",
}, {
m: "help2",
help: "escape \\ \n \\ \" \\x chars",
}, {
m: "nounit",
unit: "",
}, {
m: `go_gc_duration_seconds{quantile="1.0",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
m: `go_gc_duration_seconds_count`,
v: 99,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
}, {
m: `some:aggregate:rate5m{a_b="c"}`,
v: 1,
lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
}, {
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
}, {
m: "go_goroutines",
typ: model.MetricTypeGauge,
}, {
m: `go_goroutines`,
v: 33,
t: int64p(123123),
lset: labels.FromStrings("__name__", "go_goroutines"),
}, {
m: "hh",
typ: model.MetricTypeHistogram,
}, {
m: `hh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"),
}, {
m: "gh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `gh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"),
}, {
m: "hhh",
typ: model.MetricTypeHistogram,
}, {
m: `hhh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4},
},
}, {
m: `hhh_count`,
v: 1,
lset: labels.FromStrings("__name__", "hhh_count"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4},
},
}, {
m: "ggh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `ggh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
},
}, {
m: `ggh_count`,
v: 1,
lset: labels.FromStrings("__name__", "ggh_count"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
},
}, {
m: "smr_seconds",
typ: model.MetricTypeSummary,
}, {
m: `smr_seconds_count`,
v: 2,
lset: labels.FromStrings("__name__", "smr_seconds_count"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321},
},
}, {
m: `smr_seconds_sum`,
v: 42,
lset: labels.FromStrings("__name__", "smr_seconds_sum"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321},
},
}, {
m: "ii",
typ: model.MetricTypeInfo,
}, {
m: `ii{foo="bar"}`,
v: 1,
lset: labels.FromStrings("__name__", "ii", "foo", "bar"),
}, {
m: "ss",
typ: model.MetricTypeStateset,
}, {
m: `ss{ss="foo"}`,
v: 1,
lset: labels.FromStrings("__name__", "ss", "ss", "foo"),
}, {
m: `ss{ss="bar"}`,
v: 0,
lset: labels.FromStrings("__name__", "ss", "ss", "bar"),
}, {
m: `ss{A="a"}`,
v: 0,
lset: labels.FromStrings("A", "a", "__name__", "ss"),
}, {
m: "un",
typ: model.MetricTypeUnknown,
}, {
m: "_metric_starting_with_underscore",
v: 1,
lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
}, {
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
}, {
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: "foo",
help: "Counter with and without labels to certify CT is parsed for both cases",
}, {
m: "foo",
typ: model.MetricTypeCounter,
}, {
m: "foo_total",
v: 17,
lset: labels.FromStrings("__name__", "foo_total"),
t: int64p(1520879607789),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
},
ct: 1520872607123,
}, {
m: `foo_total{a="b"}`,
v: 17.0,
lset: labels.FromStrings("__name__", "foo_total", "a", "b"),
t: int64p(1520879607789),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
},
ct: 1520872607123,
}, {
m: `foo_total{le="c"}`,
v: 21.0,
lset: labels.FromStrings("__name__", "foo_total", "le", "c"),
ct: 1520872621123,
}, {
m: `foo_total{le="1"}`,
v: 10.0,
lset: labels.FromStrings("__name__", "foo_total", "le", "1"),
}, {
m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
}, {
m: "bar",
typ: model.MetricTypeSummary,
}, {
m: "bar_count",
v: 17.0,
lset: labels.FromStrings("__name__", "bar_count"),
ct: 1520872608124,
}, {
m: "bar_sum",
v: 324789.3,
lset: labels.FromStrings("__name__", "bar_sum"),
ct: 1520872608124,
}, {
m: `bar{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"),
ct: 1520872608124,
}, {
m: `bar{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"),
ct: 1520872608124,
}, {
m: "baz",
help: "Histogram with the same objective as above's summary",
}, {
m: "baz",
typ: model.MetricTypeHistogram,
}, {
m: `baz_bucket{le="0.0"}`,
v: 0,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"),
ct: 1520872609125,
}, {
m: `baz_bucket{le="+Inf"}`,
v: 17,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"),
ct: 1520872609125,
}, {
m: `baz_count`,
v: 17,
lset: labels.FromStrings("__name__", "baz_count"),
ct: 1520872609125,
}, {
m: `baz_sum`,
v: 324789.3,
lset: labels.FromStrings("__name__", "baz_sum"),
ct: 1520872609125,
}, {
m: "fizz_created",
help: "Gauge which shouldn't be parsed as CT",
}, {
m: "fizz_created",
typ: model.MetricTypeGauge,
}, {
m: `fizz_created`,
v: 17,
lset: labels.FromStrings("__name__", "fizz_created"),
}, {
m: "something",
help: "Histogram with _created between buckets and summary",
}, {
m: "something",
typ: model.MetricTypeHistogram,
}, {
m: `something_count`,
v: 18,
lset: labels.FromStrings("__name__", "something_count"),
ct: 1520430001000,
}, {
m: `something_sum`,
v: 324789.4,
lset: labels.FromStrings("__name__", "something_sum"),
ct: 1520430001000,
}, {
m: `something_bucket{le="0.0"}`,
v: 1,
lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"),
ct: 1520430001000,
}, {
m: `something_bucket{le="1"}`,
v: 2,
lset: labels.FromStrings("__name__", "something_bucket", "le", "1.0"),
ct: 1520430001000,
}, {
m: `something_bucket{le="+Inf"}`,
v: 18,
lset: labels.FromStrings("__name__", "something_bucket", "le", "+Inf"),
ct: 1520430001000,
}, {
m: "yum",
help: "Summary with _created between sum and quantiles",
}, {
m: "yum",
typ: model.MetricTypeSummary,
}, {
m: `yum_count`,
v: 20,
lset: labels.FromStrings("__name__", "yum_count"),
ct: 1520430003000,
}, {
m: `yum_sum`,
v: 324789.5,
lset: labels.FromStrings("__name__", "yum_sum"),
ct: 1520430003000,
}, {
m: `yum{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"),
ct: 1520430003000,
}, {
m: `yum{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"),
ct: 1520430003000,
}, {
m: "foobar",
help: "Summary with _created as the first line",
}, {
m: "foobar",
typ: model.MetricTypeSummary,
}, {
m: `foobar_count`,
v: 21,
lset: labels.FromStrings("__name__", "foobar_count"),
ct: 1520430004000,
}, {
m: `foobar_sum`,
v: 324789.6,
lset: labels.FromStrings("__name__", "foobar_sum"),
ct: 1520430004000,
}, {
m: `foobar{quantile="0.95"}`,
v: 123.8,
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"),
ct: 1520430004000,
}, {
m: `foobar{quantile="0.99"}`,
v: 150.1,
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"),
ct: 1520430004000,
}, {
m: "metric",
help: "foo\x00bar",
}, {
m: "null_byte_metric{a=\"abc\x00\"}",
v: 1,
lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"),
},
for _, typeAndUnitEnabled := range []bool{false, true} {
t.Run(fmt.Sprintf("type-and-unit=%v", typeAndUnitEnabled), func(t *testing.T) {
exp := []parsedEntry{
{
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
}, {
m: "go_gc_duration_seconds",
typ: model.MetricTypeSummary,
}, {
m: "go_gc_duration_seconds",
unit: "seconds",
}, {
m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.0"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
),
}, {
m: `go_gc_duration_seconds{quantile="0.25"}`,
v: 7.424100000000001e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.25"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
),
}, {
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.5", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
),
}, {
m: "nohelp1",
help: "",
}, {
m: "help2",
help: "escape \\ \n \\ \" \\x chars",
}, {
m: "nounit",
unit: "",
}, {
m: `go_gc_duration_seconds{quantile="1.0",a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "1.0", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
),
}, {
m: `go_gc_duration_seconds_count`,
v: 99,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
),
}, {
m: `some:aggregate:rate5m{a_b="c"}`,
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), model.MetricTypeSummary),
}, {
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
}, {
m: "go_goroutines",
typ: model.MetricTypeGauge,
}, {
m: `go_goroutines`,
v: 33,
t: int64p(123123),
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_goroutines", "__type__", string(model.MetricTypeGauge)),
labels.FromStrings("__name__", "go_goroutines"),
),
}, {
m: "hh",
typ: model.MetricTypeHistogram,
}, {
m: `hh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "hh_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"),
),
}, {
m: "gh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `gh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "gh_bucket", "__type__", string(model.MetricTypeGaugeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"),
),
}, {
m: "hhh",
typ: model.MetricTypeHistogram,
}, {
m: `hhh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "hhh_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4},
},
}, {
m: `hhh_count`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "hhh_count", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "hhh_count"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4},
},
}, {
m: "ggh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `ggh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ggh_bucket", "__type__", string(model.MetricTypeGaugeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
},
}, {
m: `ggh_count`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ggh_count", "__type__", string(model.MetricTypeGaugeHistogram)),
labels.FromStrings("__name__", "ggh_count"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
},
}, {
m: "smr_seconds",
typ: model.MetricTypeSummary,
}, {
m: `smr_seconds_count`,
v: 2,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "smr_seconds_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "smr_seconds_count"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321},
},
}, {
m: `smr_seconds_sum`,
v: 42,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "smr_seconds_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "smr_seconds_sum"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321},
},
}, {
m: "ii",
typ: model.MetricTypeInfo,
}, {
m: `ii{foo="bar"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ii", "__type__", string(model.MetricTypeInfo), "foo", "bar"),
labels.FromStrings("__name__", "ii", "foo", "bar"),
),
}, {
m: "ss",
typ: model.MetricTypeStateset,
}, {
m: `ss{ss="foo"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "ss", "foo"),
labels.FromStrings("__name__", "ss", "ss", "foo"),
),
}, {
m: `ss{ss="bar"}`,
v: 0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "ss", "bar"),
labels.FromStrings("__name__", "ss", "ss", "bar"),
),
}, {
m: `ss{A="a"}`,
v: 0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "A", "a"),
labels.FromStrings("__name__", "ss", "A", "a"),
),
}, {
m: "un",
typ: model.MetricTypeUnknown,
}, {
m: "_metric_starting_with_underscore",
v: 1,
lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
}, {
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
}, {
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: "foo",
help: "Counter with and without labels to certify CT is parsed for both cases",
}, {
m: "foo",
typ: model.MetricTypeCounter,
}, {
m: "foo_total",
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter)),
labels.FromStrings("__name__", "foo_total"),
),
t: int64p(1520879607789),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
},
ct: 1520872607123,
}, {
m: `foo_total{a="b"}`,
v: 17.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "a", "b"),
labels.FromStrings("__name__", "foo_total", "a", "b"),
),
t: int64p(1520879607789),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
},
ct: 1520872607123,
}, {
m: `foo_total{le="c"}`,
v: 21.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "le", "c"),
labels.FromStrings("__name__", "foo_total", "le", "c"),
),
ct: 1520872621123,
}, {
m: `foo_total{le="1"}`,
v: 10.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "le", "1"),
labels.FromStrings("__name__", "foo_total", "le", "1"),
),
}, {
m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
}, {
m: "bar",
typ: model.MetricTypeSummary,
}, {
m: "bar_count",
v: 17.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "bar_count"),
),
ct: 1520872608124,
}, {
m: "bar_sum",
v: 324789.3,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "bar_sum"),
),
ct: 1520872608124,
}, {
m: `bar{quantile="0.95"}`,
v: 123.7,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"),
labels.FromStrings("__name__", "bar", "quantile", "0.95"),
),
ct: 1520872608124,
}, {
m: `bar{quantile="0.99"}`,
v: 150.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"),
labels.FromStrings("__name__", "bar", "quantile", "0.99"),
),
ct: 1520872608124,
}, {
m: "baz",
help: "Histogram with the same objective as above's summary",
}, {
m: "baz",
typ: model.MetricTypeHistogram,
}, {
m: `baz_bucket{le="0.0"}`,
v: 0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_bucket", "__type__", string(model.MetricTypeHistogram), "le", "0.0"),
labels.FromStrings("__name__", "baz_bucket", "le", "0.0"),
),
ct: 1520872609125,
}, {
m: `baz_bucket{le="+Inf"}`,
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"),
),
ct: 1520872609125,
}, {
m: `baz_count`,
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_count", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "baz_count"),
),
ct: 1520872609125,
}, {
m: `baz_sum`,
v: 324789.3,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_sum", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "baz_sum"),
),
ct: 1520872609125,
}, {
m: "fizz_created",
help: "Gauge which shouldn't be parsed as CT",
}, {
m: "fizz_created",
typ: model.MetricTypeGauge,
}, {
m: `fizz_created`,
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "fizz_created", "__type__", string(model.MetricTypeGauge)),
labels.FromStrings("__name__", "fizz_created"),
),
}, {
m: "something",
help: "Histogram with _created between buckets and summary",
}, {
m: "something",
typ: model.MetricTypeHistogram,
}, {
m: `something_count`,
v: 18,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_count", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "something_count"),
),
ct: 1520430001000,
}, {
m: `something_sum`,
v: 324789.4,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_sum", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "something_sum"),
),
ct: 1520430001000,
}, {
m: `something_bucket{le="0.0"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "0.0"),
labels.FromStrings("__name__", "something_bucket", "le", "0.0"),
),
ct: 1520430001000,
}, {
m: `something_bucket{le="1"}`,
v: 2,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "1.0"),
labels.FromStrings("__name__", "something_bucket", "le", "1.0"),
),
ct: 1520430001000,
}, {
m: `something_bucket{le="+Inf"}`,
v: 18,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "something_bucket", "le", "+Inf"),
),
ct: 1520430001000,
}, {
m: "yum",
help: "Summary with _created between sum and quantiles",
}, {
m: "yum",
typ: model.MetricTypeSummary,
}, {
m: `yum_count`,
v: 20,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "yum_count"),
),
ct: 1520430003000,
}, {
m: `yum_sum`,
v: 324789.5,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "yum_sum"),
),
ct: 1520430003000,
}, {
m: `yum{quantile="0.95"}`,
v: 123.7,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"),
labels.FromStrings("__name__", "yum", "quantile", "0.95"),
),
ct: 1520430003000,
}, {
m: `yum{quantile="0.99"}`,
v: 150.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"),
labels.FromStrings("__name__", "yum", "quantile", "0.99"),
),
ct: 1520430003000,
}, {
m: "foobar",
help: "Summary with _created as the first line",
}, {
m: "foobar",
typ: model.MetricTypeSummary,
}, {
m: `foobar_count`,
v: 21,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "foobar_count"),
),
ct: 1520430004000,
}, {
m: `foobar_sum`,
v: 324789.6,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "foobar_sum"),
),
ct: 1520430004000,
}, {
m: `foobar{quantile="0.95"}`,
v: 123.8,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"),
labels.FromStrings("__name__", "foobar", "quantile", "0.95"),
),
ct: 1520430004000,
}, {
m: `foobar{quantile="0.99"}`,
v: 150.1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"),
labels.FromStrings("__name__", "foobar", "quantile", "0.99"),
),
ct: 1520430004000,
}, {
m: "metric",
help: "foo\x00bar",
}, {
m: "null_byte_metric{a=\"abc\x00\"}",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), model.MetricTypeSummary),
},
}
opts := []OpenMetricsOption{WithOMParserCTSeriesSkipped()}
if typeAndUnitEnabled {
opts = append(opts, WithOMParserTypeAndUnitLabels())
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), opts...)
got := testParse(t, p)
requireEntries(t, exp, got)
})
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
got := testParse(t, p)
requireEntries(t, exp, got)
}
func TestUTF8OpenMetricsParse(t *testing.T) {
func TestOpenMetricsParse_UTF8(t *testing.T) {
input := `# HELP "go.gc_duration_seconds" A summary of the GC invocation durations.
# TYPE "go.gc_duration_seconds" summary
# UNIT "go.gc_duration_seconds" seconds

@ -32,6 +32,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/schema"
)
type promlexer struct {
@ -160,16 +161,19 @@ type PromParser struct {
// of the metric name and label names and values for this series.
// p.offsets[0] is the start character of the metric name.
// p.offsets[1] is the end of the metric name.
// Subsequently, p.offsets is a pair of pair of offsets for the positions
// Subsequently, p.offsets is a pair of offsets for the positions
// of the label name and value start and end characters.
offsets []int
enableTypeAndUnitLabels bool
}
// NewPromParser returns a new parser of the byte slice.
func NewPromParser(b []byte, st *labels.SymbolTable) Parser {
func NewPromParser(b []byte, st *labels.SymbolTable, enableTypeAndUnitLabels bool) Parser {
return &PromParser{
l: &promlexer{b: append(b, '\n')},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
l: &promlexer{b: append(b, '\n')},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
}
}
@ -226,19 +230,33 @@ func (p *PromParser) Comment() []byte {
// Labels writes the labels of the current sample into the passed labels.
func (p *PromParser) Labels(l *labels.Labels) {
s := yoloString(p.series)
p.builder.Reset()
metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start])
p.builder.Add(labels.MetricName, metricName)
m := schema.Metadata{
Name: metricName,
// NOTE(bwplotka): There is a known case where the type is wrong on a broken exposition
// (see the TestPromParse windspeed metric). Fixing it would require extra
// allocs and benchmarks. Since it was always broken, don't fix for now.
Type: p.mtype,
}
if p.enableTypeAndUnitLabels {
m.AddToLabels(&p.builder)
} else {
p.builder.Add(labels.MetricName, metricName)
}
for i := 2; i < len(p.offsets); i += 4 {
a := p.offsets[i] - p.start
b := p.offsets[i+1] - p.start
label := unreplace(s[a:b])
if p.enableTypeAndUnitLabels && !m.IsEmptyFor(label) {
// Dropping user provided metadata labels, if found in the OM metadata.
continue
}
c := p.offsets[i+2] - p.start
d := p.offsets[i+3] - p.start
value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d]))
p.builder.Add(label, value)
}

@ -14,6 +14,7 @@
package textparse
import (
"fmt"
"io"
"testing"
@ -23,6 +24,45 @@ import (
"github.com/prometheus/prometheus/model/labels"
)
// lbls is a helper for the readability of the expectations.
func typeAndUnitLabels(typeAndUnitEnabled bool, enabled, disabled labels.Labels) labels.Labels {
if typeAndUnitEnabled {
return enabled
}
return disabled
}
// todoDetectFamilySwitch exists because there's a known TODO that require dedicated PR and benchmarks for PROM-39.
// OM and Prom text format do NOT require TYPE, HELP or UNIT lines. This means that metric families can switch without
// those metadata entries e.g.:
// ```
// TYPE go_goroutines gauge
// go_goroutines 33 # previous metric
// different_metric_total 12 # <--- different family!
// ```
// The expected type for "different_metric_total" is obviously unknown type and unit, but it's surprisingly expensive and complex
// to reliably write parser for those cases. Two main issues:
// a. TYPE and UNIT are associated with "metric family" which is different than resulting metric name (e.g. histograms).
// b. You have to alloc additional entries to pair TYPE and UNIT with metric families they refer to (nit)
//
// This problem is elevated for PROM-39 feature.
//
// Current metadata handling is semi broken here for this as the (a) is expensive and currently not fully accurate
// see: https://github.com/prometheus/prometheus/blob/dbf5d01a62249eddcd202303069f6cf7dd3c4a73/scrape/scrape.go#L1916
//
// To iterate, we keep it "knowingly" broken behind the feature flag.
// TODO(bwplotka): Remove this once we fix the problematic case e.g.
// - introduce more accurate isSeriesPartOfFamily shared helper or even parser method that tells when new metric family starts
func todoDetectFamilySwitch(typeAndUnitEnabled bool, expected labels.Labels, brokenTypeInherited model.MetricType) labels.Labels {
if typeAndUnitEnabled && brokenTypeInherited != model.MetricTypeUnknown {
// Hack for now.
b := labels.NewBuilder(expected)
b.Set("__type__", string(brokenTypeInherited))
return b.Labels()
}
return expected
}
func TestPromParse(t *testing.T) {
input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
@ -55,153 +95,299 @@ some:aggregate:rate5m{a_b="c"} 1
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 33 123123
# TYPE some_counter_total counter
# HELP some_counter_total Help after type.
some_counter_total 12
# HELP nohelp3
_metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1
testmetric{le="10"} 1`
testmetric{le="10"} 1
# HELP type_and_unit_test1 Type specified in metadata overrides.
# TYPE type_and_unit_test1 gauge
type_and_unit_test1{__type__="counter"} 123
# HELP type_and_unit_test2 Type specified in label.
type_and_unit_test2{__type__="counter"} 123`
input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
exp := []parsedEntry{
{
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
}, {
m: "go_gc_duration_seconds",
typ: model.MetricTypeSummary,
}, {
m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
}, {
m: `go_gc_duration_seconds{quantile="0.25",}`,
v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
}, {
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
}, {
m: `go_gc_duration_seconds{quantile="0.8", a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.8", "a", "b"),
}, {
m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"),
}, {
m: "prometheus_http_request_duration_seconds",
help: "Histogram of latencies for HTTP requests.",
}, {
m: "prometheus_http_request_duration_seconds",
typ: model.MetricTypeHistogram,
}, {
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="1"}`,
v: 423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "1.0"),
}, {
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="2"}`,
v: 1423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "2.0"),
}, {
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"}`,
v: 1423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "+Inf"),
}, {
m: `prometheus_http_request_duration_seconds_sum{handler="/"}`,
v: 2000,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "handler", "/"),
}, {
m: `prometheus_http_request_duration_seconds_count{handler="/"}`,
v: 1423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "handler", "/"),
}, {
comment: "# Hrandom comment starting with prefix of HELP",
}, {
comment: "#",
}, {
m: `wind_speed{A="2",c="3"}`,
v: 12345,
lset: labels.FromStrings("A", "2", "__name__", "wind_speed", "c", "3"),
}, {
comment: "# comment with escaped \\n newline",
}, {
comment: "# comment with escaped \\ escape character",
}, {
m: "nohelp1",
help: "",
}, {
m: "nohelp2",
help: "",
}, {
m: `go_gc_duration_seconds{ quantile="1.0", a="b" }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
m: `go_gc_duration_seconds { quantile="1.0", a="b" }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
m: `go_gc_duration_seconds { quantile= "1.0", a= "b", }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
m: `go_gc_duration_seconds { quantile = "1.0", a = "b" }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
// NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed.
m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"),
}, {
m: `go_gc_duration_seconds_count`,
v: 99,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
}, {
m: `some:aggregate:rate5m{a_b="c"}`,
v: 1,
lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
}, {
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
}, {
m: "go_goroutines",
typ: model.MetricTypeGauge,
}, {
m: `go_goroutines`,
v: 33,
t: int64p(123123),
lset: labels.FromStrings("__name__", "go_goroutines"),
}, {
m: "_metric_starting_with_underscore",
v: 1,
lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
}, {
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
}, {
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: `testmetric{le="10"}`,
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "le", "10"),
}, {
m: "metric",
help: "foo\x00bar",
}, {
m: "null_byte_metric{a=\"abc\x00\"}",
v: 1,
lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"),
},
}
for _, typeAndUnitEnabled := range []bool{false, true} {
t.Run(fmt.Sprintf("type-and-unit=%v", typeAndUnitEnabled), func(t *testing.T) {
exp := []parsedEntry{
{
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
},
{
m: "go_gc_duration_seconds",
typ: model.MetricTypeSummary,
},
{
m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.0"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
),
},
{
m: `go_gc_duration_seconds{quantile="0.25",}`,
v: 7.424100000000001e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.25"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
),
},
{
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.5", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
),
},
{
m: `go_gc_duration_seconds{quantile="0.8", a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.8", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.8", "a", "b"),
),
},
{
m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.9", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"),
),
},
{
m: "prometheus_http_request_duration_seconds",
help: "Histogram of latencies for HTTP requests.",
},
{
m: "prometheus_http_request_duration_seconds",
typ: model.MetricTypeHistogram,
},
{
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="1"}`,
v: 423,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "__type__", string(model.MetricTypeHistogram), "handler", "/", "le", "1.0"),
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "1.0"),
),
},
{
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="2"}`,
v: 1423,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "__type__", string(model.MetricTypeHistogram), "handler", "/", "le", "2.0"),
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "2.0"),
),
},
{
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"}`,
v: 1423,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "__type__", string(model.MetricTypeHistogram), "handler", "/", "le", "+Inf"),
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "+Inf"),
),
},
{
m: `prometheus_http_request_duration_seconds_sum{handler="/"}`,
v: 2000,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "__type__", string(model.MetricTypeHistogram), "handler", "/"),
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "handler", "/"),
),
},
{
m: `prometheus_http_request_duration_seconds_count{handler="/"}`,
v: 1423,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "__type__", string(model.MetricTypeHistogram), "handler", "/"),
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "handler", "/"),
),
},
{
comment: "# Hrandom comment starting with prefix of HELP",
},
{
comment: "#",
},
{
m: `wind_speed{A="2",c="3"}`,
v: 12345,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
// NOTE(bwplotka): This is knowingly broken, inheriting old type when TYPE was not specified on a new metric.
// This was broken forever on a case for a broken exposition. Don't fix for now (expensive).
labels.FromStrings("A", "2", "__name__", "wind_speed", "__type__", string(model.MetricTypeHistogram), "c", "3"),
labels.FromStrings("A", "2", "__name__", "wind_speed", "c", "3"),
),
},
{
comment: "# comment with escaped \\n newline",
},
{
comment: "# comment with escaped \\ escape character",
},
{
m: "nohelp1",
help: "",
},
{
m: "nohelp2",
help: "",
},
{
m: `go_gc_duration_seconds{ quantile="1.0", a="b" }`,
v: 8.3835e-05,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram),
},
{
m: `go_gc_duration_seconds { quantile="1.0", a="b" }`,
v: 8.3835e-05,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram),
},
{
m: `go_gc_duration_seconds { quantile= "1.0", a= "b", }`,
v: 8.3835e-05,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram),
},
{
m: `go_gc_duration_seconds { quantile = "1.0", a = "b" }`,
v: 8.3835e-05,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram),
},
{
// NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed.
m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`,
v: 8.3835e-05,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"), model.MetricTypeHistogram),
},
{
m: `go_gc_duration_seconds_count`,
v: 99,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds_count"), model.MetricTypeHistogram),
},
{
m: `some:aggregate:rate5m{a_b="c"}`,
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), model.MetricTypeHistogram),
},
{
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
},
{
m: "go_goroutines",
typ: model.MetricTypeGauge,
},
{
m: `go_goroutines`,
v: 33,
t: int64p(123123),
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_goroutines", "__type__", string(model.MetricTypeGauge)),
labels.FromStrings("__name__", "go_goroutines"),
),
},
{
m: "some_counter_total",
typ: model.MetricTypeCounter,
},
{
m: "some_counter_total",
help: "Help after type.",
},
{
m: `some_counter_total`,
v: 12,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "some_counter_total", "__type__", string(model.MetricTypeCounter)),
labels.FromStrings("__name__", "some_counter_total"),
),
},
{
m: "nohelp3",
help: "",
},
{
m: "_metric_starting_with_underscore",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "_metric_starting_with_underscore"), model.MetricTypeCounter),
},
{
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"), model.MetricTypeCounter),
},
{
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "testmetric", "label", `"bar"`), model.MetricTypeCounter),
},
{
m: `testmetric{le="10"}`,
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "testmetric", "le", "10"), model.MetricTypeCounter),
},
{
m: "type_and_unit_test1",
help: "Type specified in metadata overrides.",
},
{
m: "type_and_unit_test1",
typ: model.MetricTypeGauge,
},
{
m: "type_and_unit_test1{__type__=\"counter\"}",
v: 123,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "type_and_unit_test1", "__type__", string(model.MetricTypeGauge)),
labels.FromStrings("__name__", "type_and_unit_test1", "__type__", string(model.MetricTypeCounter)),
),
},
{
m: "type_and_unit_test2",
help: "Type specified in label.",
},
{
m: "type_and_unit_test2{__type__=\"counter\"}",
v: 123,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "type_and_unit_test2", "__type__", string(model.MetricTypeCounter)), model.MetricTypeGauge),
},
{
m: "metric",
help: "foo\x00bar",
},
{
m: "null_byte_metric{a=\"abc\x00\"}",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), model.MetricTypeGauge),
},
}
p := NewPromParser([]byte(input), labels.NewSymbolTable())
got := testParse(t, p)
requireEntries(t, exp, got)
p := NewPromParser([]byte(input), labels.NewSymbolTable(), typeAndUnitEnabled)
got := testParse(t, p)
requireEntries(t, exp, got)
})
}
}
func TestUTF8PromParse(t *testing.T) {
@ -274,7 +460,7 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"),
},
}
p := NewPromParser([]byte(input), labels.NewSymbolTable())
p := NewPromParser([]byte(input), labels.NewSymbolTable(), false)
got := testParse(t, p)
requireEntries(t, exp, got)
}
@ -355,7 +541,7 @@ func TestPromParseErrors(t *testing.T) {
}
for i, c := range cases {
p := NewPromParser([]byte(c.input), labels.NewSymbolTable())
p := NewPromParser([]byte(c.input), labels.NewSymbolTable(), false)
var err error
for err == nil {
_, err = p.Next()
@ -408,7 +594,7 @@ func TestPromNullByteHandling(t *testing.T) {
}
for i, c := range cases {
p := NewPromParser([]byte(c.input), labels.NewSymbolTable())
p := NewPromParser([]byte(c.input), labels.NewSymbolTable(), false)
var err error
for err == nil {
_, err = p.Next()

@ -31,6 +31,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
"github.com/prometheus/prometheus/schema"
)
// floatFormatBufPool is exclusively used in formatOpenMetricsFloat.
@ -72,23 +73,25 @@ type ProtobufParser struct {
exemplarReturned bool
// state is marked by the entry we are processing. EntryInvalid implies
// that we have to decode the next MetricFamily.
// that we have to decode the next MetricDescriptor.
state Entry
// Whether to also parse a classic histogram that is also present as a
// native histogram.
parseClassicHistograms bool
parseClassicHistograms bool
enableTypeAndUnitLabels bool
}
// NewProtobufParser returns a parser for the payload in the byte slice.
func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolTable) Parser {
func NewProtobufParser(b []byte, parseClassicHistograms, enableTypeAndUnitLabels bool, st *labels.SymbolTable) Parser {
return &ProtobufParser{
dec: dto.NewMetricStreamingDecoder(b),
entryBytes: &bytes.Buffer{},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16), // TODO(bwplotka): Try base builder.
state: EntryInvalid,
parseClassicHistograms: parseClassicHistograms,
state: EntryInvalid,
parseClassicHistograms: parseClassicHistograms,
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
}
}
@ -551,10 +554,27 @@ func (p *ProtobufParser) Next() (Entry, error) {
// * p.fieldsDone depending on p.fieldPos.
func (p *ProtobufParser) onSeriesOrHistogramUpdate() error {
p.builder.Reset()
p.builder.Add(labels.MetricName, p.getMagicName())
if err := p.dec.Label(&p.builder); err != nil {
return err
if p.enableTypeAndUnitLabels {
_, typ := p.Type()
m := schema.Metadata{
Name: p.getMagicName(),
Type: typ,
Unit: p.dec.GetUnit(),
}
m.AddToLabels(&p.builder)
if err := p.dec.Label(schema.IgnoreOverriddenMetadataLabelsScratchBuilder{
Overwrite: m,
ScratchBuilder: &p.builder,
}); err != nil {
return err
}
} else {
p.builder.Add(labels.MetricName, p.getMagicName())
if err := p.dec.Label(&p.builder); err != nil {
return err
}
}
if needed, name, value := p.getMagicLabel(); needed {

@ -832,8 +832,8 @@ func TestProtobufParse(t *testing.T) {
expected []parsedEntry
}{
{
name: "ignore classic buckets of native histograms",
parser: NewProtobufParser(inputBuf.Bytes(), false, labels.NewSymbolTable()),
name: "parseClassicHistograms=false/enableTypeAndUnitLabels=false",
parser: NewProtobufParser(inputBuf.Bytes(), false, false, labels.NewSymbolTable()),
expected: []parsedEntry{
{
m: "go_build_info",
@ -844,7 +844,7 @@ func TestProtobufParse(t *testing.T) {
typ: model.MetricTypeGauge,
},
{
m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)",
m: "go_build_info\xffchecksum\xff\xffpath\xffgithub.com/prometheus/client_golang\xffversion\xff(devel)",
v: 1,
lset: labels.FromStrings(
"__name__", "go_build_info",
@ -1467,8 +1467,8 @@ func TestProtobufParse(t *testing.T) {
},
},
{
name: "parse classic and native buckets",
parser: NewProtobufParser(inputBuf.Bytes(), true, labels.NewSymbolTable()),
name: "parseClassicHistograms=false/enableTypeAndUnitLabels=true",
parser: NewProtobufParser(inputBuf.Bytes(), false, true, labels.NewSymbolTable()),
expected: []parsedEntry{
{
m: "go_build_info",
@ -1479,7 +1479,679 @@ func TestProtobufParse(t *testing.T) {
typ: model.MetricTypeGauge,
},
{
m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)",
m: "go_build_info\xff__type__\xffgauge\xffchecksum\xff\xffpath\xffgithub.com/prometheus/client_golang\xffversion\xff(devel)",
v: 1,
lset: labels.FromStrings(
"__name__", "go_build_info",
"__type__", string(model.MetricTypeGauge),
"checksum", "",
"path", "github.com/prometheus/client_golang",
"version", "(devel)",
),
},
{
m: "go_memstats_alloc_bytes_total",
help: "Total number of bytes allocated, even if freed.",
},
{
m: "go_memstats_alloc_bytes_total",
unit: "bytes",
},
{
m: "go_memstats_alloc_bytes_total",
typ: model.MetricTypeCounter,
},
{
m: "go_memstats_alloc_bytes_total\xff__type__\xffcounter\xff__unit__\xffbytes",
v: 1.546544e+06,
lset: labels.FromStrings(
"__name__", "go_memstats_alloc_bytes_total",
"__type__", string(model.MetricTypeCounter),
"__unit__", "bytes",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233},
},
},
{
m: "something_untyped",
help: "Just to test the untyped type.",
},
{
m: "something_untyped",
typ: model.MetricTypeUnknown,
},
{
m: "something_untyped",
t: int64p(1234567),
v: 42,
lset: labels.FromStrings(
"__name__", "something_untyped",
),
},
{
m: "test_histogram",
help: "Test histogram with many buckets removed to keep it manageable in size.",
},
{
m: "test_histogram",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram\xff__type__\xffhistogram",
t: int64p(1234568),
shs: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram",
"__type__", string(model.MetricTypeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{
m: "test_gauge_histogram",
help: "Like test_histogram but as gauge histogram.",
},
{
m: "test_gauge_histogram",
typ: model.MetricTypeGaugeHistogram,
},
{
m: "test_gauge_histogram\xff__type__\xffgaugehistogram",
t: int64p(1234568),
shs: &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_gauge_histogram",
"__type__", string(model.MetricTypeGaugeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{
m: "test_float_histogram",
help: "Test float histogram with many buckets removed to keep it manageable in size.",
},
{
m: "test_float_histogram",
typ: model.MetricTypeHistogram,
},
{
m: "test_float_histogram\xff__type__\xffhistogram",
t: int64p(1234568),
fhs: &histogram.FloatHistogram{
Count: 175.0,
ZeroCount: 2.0,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0},
NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0},
},
lset: labels.FromStrings(
"__name__", "test_float_histogram",
"__type__", string(model.MetricTypeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{
m: "test_gauge_float_histogram",
help: "Like test_float_histogram but as gauge histogram.",
},
{
m: "test_gauge_float_histogram",
typ: model.MetricTypeGaugeHistogram,
},
{
m: "test_gauge_float_histogram\xff__type__\xffgaugehistogram",
t: int64p(1234568),
fhs: &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 175.0,
ZeroCount: 2.0,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0},
NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0},
},
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram",
"__type__", string(model.MetricTypeGaugeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{
m: "test_histogram2",
help: "Similar histogram as before but now without sparse buckets.",
},
{
m: "test_histogram2",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram2_count\xff__type__\xffhistogram",
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram2_count",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_histogram2_sum\xff__type__\xffhistogram",
v: 0.000828,
lset: labels.FromStrings(
"__name__", "test_histogram2_sum",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff-0.00048",
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "-0.00048",
),
},
{
m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff-0.00038",
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "-0.00038",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146},
},
},
{
m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff1.0",
v: 16,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "1.0",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false},
},
},
{
m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff+Inf",
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "+Inf",
),
},
{
m: "test_histogram3",
help: "Similar histogram as before but now with integer buckets.",
},
{
m: "test_histogram3",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram3_count\xff__type__\xffhistogram",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_count",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_histogram3_sum\xff__type__\xffhistogram",
v: 50,
lset: labels.FromStrings(
"__name__", "test_histogram3_sum",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff-20.0",
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "-20.0",
),
},
{
m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff20.0",
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "20.0",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146},
},
},
{
m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff30.0",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "30.0",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false},
},
},
{
m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff+Inf",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "+Inf",
),
},
{
m: "test_histogram_family",
help: "Test histogram metric family with two very simple histograms.",
},
{
m: "test_histogram_family",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram_family\xff__type__\xffhistogram\xfffoo\xffbar",
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
Count: 5,
Sum: 12.1,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: 8, Length: 2},
},
NegativeSpans: []histogram.Span{},
PositiveBuckets: []int64{2, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram_family",
"__type__", string(model.MetricTypeHistogram),
"foo", "bar",
),
},
{
m: "test_histogram_family\xff__type__\xffhistogram\xfffoo\xffbaz",
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
Count: 6,
Sum: 13.1,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: 8, Length: 2},
},
NegativeSpans: []histogram.Span{},
PositiveBuckets: []int64{1, 4},
},
lset: labels.FromStrings(
"__name__", "test_histogram_family",
"__type__", string(model.MetricTypeHistogram),
"foo", "baz",
),
},
{
m: "test_float_histogram_with_zerothreshold_zero",
help: "Test float histogram with a zero threshold of zero.",
},
{
m: "test_float_histogram_with_zerothreshold_zero",
typ: model.MetricTypeHistogram,
},
{
m: "test_float_histogram_with_zerothreshold_zero\xff__type__\xffhistogram",
fhs: &histogram.FloatHistogram{
Count: 5.0,
Sum: 12.1,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: 8, Length: 2},
},
PositiveBuckets: []float64{2.0, 3.0},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "test_float_histogram_with_zerothreshold_zero",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "rpc_durations_seconds",
help: "RPC latency distributions.",
},
{
m: "rpc_durations_seconds",
typ: model.MetricTypeSummary,
},
{
m: "rpc_durations_seconds_count\xff__type__\xffsummary\xffservice\xffexponential",
v: 262,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds_count",
"__type__", string(model.MetricTypeSummary),
"service", "exponential",
),
},
{
m: "rpc_durations_seconds_sum\xff__type__\xffsummary\xffservice\xffexponential",
v: 0.00025551262820703587,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds_sum",
"__type__", string(model.MetricTypeSummary),
"service", "exponential",
),
},
{
m: "rpc_durations_seconds\xff__type__\xffsummary\xffquantile\xff0.5\xffservice\xffexponential",
v: 6.442786329648548e-07,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds",
"__type__", string(model.MetricTypeSummary),
"quantile", "0.5",
"service", "exponential",
),
},
{
m: "rpc_durations_seconds\xff__type__\xffsummary\xffquantile\xff0.9\xffservice\xffexponential",
v: 1.9435742936658396e-06,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds",
"__type__", string(model.MetricTypeSummary),
"quantile", "0.9",
"service", "exponential",
),
},
{
m: "rpc_durations_seconds\xff__type__\xffsummary\xffquantile\xff0.99\xffservice\xffexponential",
v: 4.0471608667037015e-06,
lset: labels.FromStrings(
"__type__", string(model.MetricTypeSummary),
"__name__", "rpc_durations_seconds",
"quantile", "0.99",
"service", "exponential",
),
},
{
m: "without_quantiles",
help: "A summary without quantiles.",
},
{
m: "without_quantiles",
typ: model.MetricTypeSummary,
},
{
m: "without_quantiles_count\xff__type__\xffsummary",
v: 42,
lset: labels.FromStrings(
"__name__", "without_quantiles_count",
"__type__", string(model.MetricTypeSummary),
),
},
{
m: "without_quantiles_sum\xff__type__\xffsummary",
v: 1.234,
lset: labels.FromStrings(
"__name__", "without_quantiles_sum",
"__type__", string(model.MetricTypeSummary),
),
},
{
m: "empty_histogram",
help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.",
},
{
m: "empty_histogram",
typ: model.MetricTypeHistogram,
},
{
m: "empty_histogram\xff__type__\xffhistogram",
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
PositiveSpans: []histogram.Span{},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "empty_histogram",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_counter_with_createdtimestamp",
help: "A counter with a created timestamp.",
},
{
m: "test_counter_with_createdtimestamp",
typ: model.MetricTypeCounter,
},
{
m: "test_counter_with_createdtimestamp\xff__type__\xffcounter",
v: 42,
ct: 1625851153146,
lset: labels.FromStrings(
"__name__", "test_counter_with_createdtimestamp",
"__type__", string(model.MetricTypeCounter),
),
},
{
m: "test_summary_with_createdtimestamp",
help: "A summary with a created timestamp.",
},
{
m: "test_summary_with_createdtimestamp",
typ: model.MetricTypeSummary,
},
{
m: "test_summary_with_createdtimestamp_count\xff__type__\xffsummary",
v: 42,
ct: 1625851153146,
lset: labels.FromStrings(
"__name__", "test_summary_with_createdtimestamp_count",
"__type__", string(model.MetricTypeSummary),
),
},
{
m: "test_summary_with_createdtimestamp_sum\xff__type__\xffsummary",
v: 1.234,
ct: 1625851153146,
lset: labels.FromStrings(
"__name__", "test_summary_with_createdtimestamp_sum",
"__type__", string(model.MetricTypeSummary),
),
},
{
m: "test_histogram_with_createdtimestamp",
help: "A histogram with a created timestamp.",
},
{
m: "test_histogram_with_createdtimestamp",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram_with_createdtimestamp\xff__type__\xffhistogram",
ct: 1625851153146,
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
PositiveSpans: []histogram.Span{},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "test_histogram_with_createdtimestamp",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_gaugehistogram_with_createdtimestamp",
help: "A gauge histogram with a created timestamp.",
},
{
m: "test_gaugehistogram_with_createdtimestamp",
typ: model.MetricTypeGaugeHistogram,
},
{
m: "test_gaugehistogram_with_createdtimestamp\xff__type__\xffgaugehistogram",
ct: 1625851153146,
shs: &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
PositiveSpans: []histogram.Span{},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "test_gaugehistogram_with_createdtimestamp",
"__type__", string(model.MetricTypeGaugeHistogram),
),
},
{
m: "test_histogram_with_native_histogram_exemplars",
help: "A histogram with native histogram exemplars.",
},
{
m: "test_histogram_with_native_histogram_exemplars",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram_with_native_histogram_exemplars\xff__type__\xffhistogram",
t: int64p(1234568),
shs: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars",
"__type__", string(model.MetricTypeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156},
},
},
{
m: "test_histogram_with_native_histogram_exemplars2",
help: "Another histogram with native histogram exemplars.",
},
{
m: "test_histogram_with_native_histogram_exemplars2",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram_with_native_histogram_exemplars2\xff__type__\xffhistogram",
t: int64p(1234568),
shs: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars2",
"__type__", string(model.MetricTypeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
},
},
{
name: "parseClassicHistograms=true/enableTypeAndUnitLabels=false",
parser: NewProtobufParser(inputBuf.Bytes(), true, false, labels.NewSymbolTable()),
expected: []parsedEntry{
{
m: "go_build_info",
help: "Build information about the main Go module.",
},
{
m: "go_build_info",
typ: model.MetricTypeGauge,
},
{
m: "go_build_info\xffchecksum\xff\xffpath\xffgithub.com/prometheus/client_golang\xffversion\xff(devel)",
v: 1,
lset: labels.FromStrings(
"__name__", "go_build_info",

@ -23,8 +23,6 @@ import (
proto "github.com/gogo/protobuf/proto"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
)
type MetricStreamingDecoder struct {
@ -153,12 +151,16 @@ func (m *MetricStreamingDecoder) GetLabel() {
panic("don't use GetLabel, use Label instead")
}
type scratchBuilder interface {
Add(name, value string)
}
// Label parses labels into labels scratch builder. Metric name is missing
// given the protobuf metric model and has to be deduced from the metric family name.
// TODO: The method name intentionally hide MetricStreamingDecoder.Metric.Label
// field to avoid direct use (it's not parsed). In future generator will generate
// structs tailored for streaming decoding.
func (m *MetricStreamingDecoder) Label(b *labels.ScratchBuilder) error {
func (m *MetricStreamingDecoder) Label(b scratchBuilder) error {
for _, l := range m.labels {
if err := parseLabel(m.mData[l.start:l.end], b); err != nil {
return err
@ -169,7 +171,7 @@ func (m *MetricStreamingDecoder) Label(b *labels.ScratchBuilder) error {
// parseLabel is essentially LabelPair.Unmarshal but directly adding into scratch builder
// and reusing strings.
func parseLabel(dAtA []byte, b *labels.ScratchBuilder) error {
func parseLabel(dAtA []byte, b scratchBuilder) error {
var name, value string
l := len(dAtA)
iNdEx := 0

@ -44,6 +44,7 @@ import (
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/annotations"
@ -1821,7 +1822,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
it.Reset(chkIter)
metric := selVS.Series[i].Labels()
if !ev.enableDelayedNameRemoval && dropName {
metric = metric.DropMetricName()
metric = metric.DropReserved(schema.IsMetadataLabel)
}
ss := Series{
Metric: metric,
@ -1960,7 +1961,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
if e.Op == parser.SUB {
for i := range mat {
if !ev.enableDelayedNameRemoval {
mat[i].Metric = mat[i].Metric.DropMetricName()
mat[i].Metric = mat[i].Metric.DropReserved(schema.IsMetadataLabel)
}
mat[i].DropName = true
for j := range mat[i].Floats {
@ -2709,7 +2710,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
}
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
if !ev.enableDelayedNameRemoval && returnBool {
metric = metric.DropMetricName()
metric = metric.DropReserved(schema.IsMetadataLabel)
}
insertedSigs, exists := matchedSigs[sig]
if matching.Card == parser.CardOneToOne {
@ -2776,8 +2777,9 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
}
str := string(enh.lblResultBuf)
if shouldDropMetricName(op) {
enh.lb.Del(labels.MetricName)
if changesMetricSchema(op) {
// Setting empty Metadata causes the deletion of those if they exists.
schema.Metadata{}.SetToLabels(enh.lb)
}
if matching.Card == parser.CardOneToOne {
@ -2836,9 +2838,9 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
if keep {
lhsSample.F = float
lhsSample.H = histogram
if shouldDropMetricName(op) || returnBool {
if changesMetricSchema(op) || returnBool {
if !ev.enableDelayedNameRemoval {
lhsSample.Metric = lhsSample.Metric.DropMetricName()
lhsSample.Metric = lhsSample.Metric.DropReserved(schema.IsMetadataLabel)
}
lhsSample.DropName = true
}
@ -3544,7 +3546,7 @@ func (ev *evaluator) cleanupMetricLabels(v parser.Value) {
mat := v.(Matrix)
for i := range mat {
if mat[i].DropName {
mat[i].Metric = mat[i].Metric.DropMetricName()
mat[i].Metric = mat[i].Metric.DropReserved(schema.IsMetadataLabel)
}
}
if mat.ContainsSameLabelset() {
@ -3554,7 +3556,7 @@ func (ev *evaluator) cleanupMetricLabels(v parser.Value) {
vec := v.(Vector)
for i := range vec {
if vec[i].DropName {
vec[i].Metric = vec[i].Metric.DropMetricName()
vec[i].Metric = vec[i].Metric.DropReserved(schema.IsMetadataLabel)
}
}
if vec.ContainsSameLabelset() {
@ -3656,9 +3658,9 @@ func btos(b bool) float64 {
return 0
}
// shouldDropMetricName returns whether the metric name should be dropped in the
// result of the op operation.
func shouldDropMetricName(op parser.ItemType) bool {
// changesSchema returns true whether the op operation changes the semantic meaning or
// schema of the metric.
func changesMetricSchema(op parser.ItemType) bool {
switch op {
case parser.ADD, parser.SUB, parser.DIV, parser.MUL, parser.POW, parser.MOD, parser.ATAN2:
return true

@ -31,6 +31,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/util/annotations"
)
@ -577,7 +578,7 @@ func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, ann
continue
}
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
@ -978,7 +979,7 @@ func simpleFloatFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) f
for _, el := range vals[0].(Vector) {
if el.H == nil { // Process only float samples.
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
@ -1128,7 +1129,7 @@ func funcTimestamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelpe
vec := vals[0].(Vector)
for _, el := range vec {
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
@ -1347,7 +1348,7 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
continue
}
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
@ -1362,7 +1363,7 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
continue
}
if !enh.enableDelayedNameRemoval {
mb.metric = mb.metric.DropMetricName()
mb.metric = mb.metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
@ -1393,7 +1394,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
continue
}
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
@ -1411,7 +1412,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
}
if !enh.enableDelayedNameRemoval {
mb.metric = mb.metric.DropMetricName()
mb.metric = mb.metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
@ -1629,7 +1630,7 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo
}
t := time.Unix(int64(el.F), 0).UTC()
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,

@ -61,7 +61,7 @@ const (
var symbolTable = labels.NewSymbolTable()
func fuzzParseMetricWithContentType(in []byte, contentType string) int {
p, warning := textparse.New(in, contentType, "", false, false, symbolTable)
p, warning := textparse.New(in, contentType, "", false, false, false, symbolTable)
if p == nil || warning != nil {
// An invalid content type is being passed, which should not happen
// in this context.

@ -0,0 +1,280 @@
# Test PROM-39 type and unit labels with operators.
# A. Healthy case
# NOTE: __unit__"request" is not a best practice unit, but keeping that to test the unit handling.
load 5m
http_requests_total{__type__="counter", __unit__="request", job="api-server", instance="0", group="production"} 0+10x10
http_requests_total{__type__="counter", __unit__="request", job="api-server", instance="1", group="production"} 0+20x10
http_requests_total{__type__="counter", __unit__="request", job="api-server", instance="0", group="canary"} 0+30x10
http_requests_total{__type__="counter", __unit__="request", job="api-server", instance="1", group="canary"} 0+40x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="0", group="production"} 0+50x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="1", group="production"} 0+60x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="0", group="canary"} 0+70x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="1", group="canary"} 0+80x10
eval instant at 50m SUM(http_requests_total) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests_total{__type__="counter", __unit__="request"}) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM({__type__="counter"}) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM({__unit__="request"}) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM({__type__="counter", __unit__="request"}) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests_total) BY (job) - COUNT(http_requests_total) BY (job)
{job="api-server"} 996
{job="app-server"} 2596
eval instant at 50m -http_requests_total{job="api-server",instance="0",group="production"}
{job="api-server",instance="0",group="production"} -100
eval instant at 50m +http_requests_total{job="api-server",instance="0",group="production"}
http_requests_total{__type__="counter", __unit__="request", job="api-server",instance="0",group="production"} 100
eval instant at 50m -10^3 * - SUM(http_requests_total) BY (job) ^ -1
{job="api-server"} 1
{job="app-server"} 0.38461538461538464
eval instant at 50m SUM(http_requests_total) BY (job) / 0
{job="api-server"} +Inf
{job="app-server"} +Inf
eval instant at 50m http_requests_total{group="canary", instance="0", job="api-server"} / 0
{group="canary", instance="0", job="api-server"} +Inf
eval instant at 50m 0 * http_requests_total{group="canary", instance="0", job="api-server"} % 0
{group="canary", instance="0", job="api-server"} NaN
eval instant at 50m http_requests_total{job="api-server", group="canary"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="api-server"} 400
eval instant at 50m rate(http_requests_total[25m]) * 25 * 60
{group="canary", instance="0", job="api-server"} 150
{group="canary", instance="0", job="app-server"} 350
{group="canary", instance="1", job="api-server"} 200
{group="canary", instance="1", job="app-server"} 400
{group="production", instance="0", job="api-server"} 50
{group="production", instance="0", job="app-server"} 249.99999999999997
{group="production", instance="1", job="api-server"} 100
{group="production", instance="1", job="app-server"} 300
eval instant at 50m http_requests_total{group="canary"} and http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="0", job="app-server"} 700
eval instant at 50m (http_requests_total{group="canary"} + 1) and http_requests_total{instance="0"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
eval instant at 50m http_requests_total{group="canary"} or http_requests_total{group="production"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="0", job="app-server"} 700
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="api-server"} 400
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="app-server"} 800
http_requests_total{__type__="counter", __unit__="request", group="production", instance="0", job="api-server"} 100
http_requests_total{__type__="counter", __unit__="request", group="production", instance="0", job="app-server"} 500
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="api-server"} 200
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="app-server"} 600
# On overlap the rhs samples must be dropped.
eval instant at 50m (http_requests_total{group="canary"} + 1) or http_requests_total{instance="1"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
{group="canary", instance="1", job="api-server"} 401
{group="canary", instance="1", job="app-server"} 801
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="api-server"} 200
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="app-server"} 600
eval instant at 50m http_requests_total{group="canary"} unless http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="api-server"} 400
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} unless on(job) http_requests_total{instance="0"}
eval instant at 50m http_requests_total{group="canary"} unless on(job, instance) http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="api-server"} 400
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} / on(instance,job) http_requests_total{group="production"}
{instance="0", job="api-server"} 3
{instance="0", job="app-server"} 1.4
{instance="1", job="api-server"} 2
{instance="1", job="app-server"} 1.3333333333333333
eval instant at 50m http_requests_total{group="canary"} unless ignoring(group, instance) http_requests_total{instance="0"}
eval instant at 50m http_requests_total{group="canary"} unless ignoring(group) http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="api-server"} 400
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} / ignoring(group) http_requests_total{group="production"}
{instance="0", job="api-server"} 3
{instance="0", job="app-server"} 1.4
{instance="1", job="api-server"} 2
{instance="1", job="app-server"} 1.3333333333333333
# Comparisons.
eval instant at 50m SUM(http_requests_total) BY (job) > 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests_total) BY (job) == bool SUM(http_requests_total) BY (job)
{job="api-server"} 1
{job="app-server"} 1
eval instant at 50m SUM(http_requests_total) BY (job) != bool SUM(http_requests_total) BY (job)
{job="api-server"} 0
{job="app-server"} 0
eval instant at 50m http_requests_total{job="api-server", instance="0", group="production"} == bool 100
{job="api-server", instance="0", group="production"} 1
clear
# A. Inconsistent type and unit cases for unique series.
# NOTE: __unit__"request" is not a best practice unit, but keeping that to test the unit handling.
load 5m
http_requests_total{__type__="counter", __unit__="request", job="api-server", instance="0", group="production"} 0+10x10
http_requests_total{__type__="gauge", __unit__="request", job="api-server", instance="1", group="production"} 0+20x10
http_requests_total{__type__="gauge", __unit__="not-request", job="api-server", instance="0", group="canary"} 0+30x10
http_requests_total{__type__="counter", __unit__="not-request", job="api-server", instance="1", group="canary"} 0+40x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="0", group="production"} 0+50x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="1", group="production"} 0+60x10
http_requests_total{__type__="counter", __unit__="", job="app-server", instance="0", group="canary"} 0+70x10
http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10
eval instant at 50m SUM(http_requests_total) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests_total{__type__="counter", __unit__="request"}) BY (job)
{job="api-server"} 100
{job="app-server"} 1100
eval instant at 50m SUM({__type__="counter"}) BY (job)
{job="api-server"} 500
{job="app-server"} 1800
eval instant at 50m SUM({__unit__="request"}) BY (job)
{job="api-server"} 300
{job="app-server"} 1100
eval instant at 50m SUM({__type__="counter", __unit__="request"}) BY (job)
{job="api-server"} 100
{job="app-server"} 1100
eval instant at 50m SUM(http_requests_total) BY (job) - COUNT(http_requests_total) BY (job)
{job="api-server"} 996
{job="app-server"} 2596
eval instant at 50m -http_requests_total{job="api-server",instance="0",group="production"}
{job="api-server",instance="0",group="production"} -100
eval instant at 50m +http_requests_total{job="api-server",instance="0",group="production"}
http_requests_total{__type__="counter", __unit__="request", job="api-server",instance="0",group="production"} 100
eval instant at 50m -10^3 * - SUM(http_requests_total) BY (job) ^ -1
{job="api-server"} 1
{job="app-server"} 0.38461538461538464
eval instant at 50m SUM(http_requests_total) BY (job) / 0
{job="api-server"} +Inf
{job="app-server"} +Inf
eval instant at 50m http_requests_total{group="canary", instance="0", job="api-server"} / 0
{group="canary", instance="0", job="api-server"} +Inf
eval instant at 50m 0 * http_requests_total{group="canary", instance="0", job="api-server"} % 0
{group="canary", instance="0", job="api-server"} NaN
eval instant at 50m http_requests_total{job="api-server", group="canary"}
http_requests_total{__type__="gauge", __unit__="not-request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
eval instant at 50m http_requests_total{__type__="counter", job="api-server", group="canary"}
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
eval instant at 50m rate(http_requests_total[25m]) * 25 * 60
{group="canary", instance="0", job="api-server"} 150
{group="canary", instance="0", job="app-server"} 350
{group="canary", instance="1", job="api-server"} 200
{group="canary", instance="1", job="app-server"} 400
{group="production", instance="0", job="api-server"} 50
{group="production", instance="0", job="app-server"} 249.99999999999997
{group="production", instance="1", job="api-server"} 100
{group="production", instance="1", job="app-server"} 300
eval instant at 50m http_requests_total{group="canary"} and http_requests_total{instance="0"}
http_requests_total{__type__="gauge", __unit__="not-request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="", group="canary", instance="0", job="app-server"} 700
eval instant at 50m (http_requests_total{group="canary"} + 1) and http_requests_total{instance="0"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
eval instant at 50m http_requests_total{group="canary"} or http_requests_total{group="production"}
http_requests_total{__type__="gauge", __unit__="not-request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="", group="canary", instance="0", job="app-server"} 700
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
http_requests_total{group="canary", instance="1", job="app-server"} 800
http_requests_total{__type__="counter", __unit__="request", group="production", instance="0", job="api-server"} 100
http_requests_total{__type__="counter", __unit__="request", group="production", instance="0", job="app-server"} 500
http_requests_total{__type__="gauge", __unit__="request", group="production", instance="1", job="api-server"} 200
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="app-server"} 600
# On overlap the rhs samples must be dropped.
eval instant at 50m (http_requests_total{group="canary"} + 1) or http_requests_total{instance="1"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
{group="canary", instance="1", job="api-server"} 401
{group="canary", instance="1", job="app-server"} 801
http_requests_total{__type__="gauge", __unit__="request", group="production", instance="1", job="api-server"} 200
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="app-server"} 600
eval instant at 50m http_requests_total{group="canary"} unless http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
http_requests_total{group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} unless on(job) http_requests_total{instance="0"}
eval instant at 50m http_requests_total{group="canary"} unless on(job, instance) http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
http_requests_total{group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} / on(instance,job) http_requests_total{group="production"}
{instance="0", job="api-server"} 3
{instance="0", job="app-server"} 1.4
{instance="1", job="api-server"} 2
{instance="1", job="app-server"} 1.3333333333333333
eval instant at 50m http_requests_total{group="canary"} unless ignoring(group) http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
http_requests_total{group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} / ignoring(group) http_requests_total{group="production"}
# Comparisons.
eval instant at 50m SUM(http_requests_total) BY (job) > 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests_total) BY (job) == bool SUM(http_requests_total) BY (job)
{job="api-server"} 1
{job="app-server"} 1
eval instant at 50m SUM(http_requests_total) BY (job) != bool SUM(http_requests_total) BY (job)
{job="api-server"} 0
{job="app-server"} 0
eval instant at 50m http_requests_total{job="api-server", instance="0", group="production"} == bool 100
{job="api-server", instance="0", group="production"} 1

@ -0,0 +1,157 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
)
const (
// Special label names and selectors for schema.Metadata fields.
// They are currently private to ensure __name__, __type__ and __unit__ are used
// together and remain extensible in Prometheus. See NewMetadataFromLabels and Metadata
// methods for the interactions with the labels package structs.
metricName = "__name__"
metricType = "__type__"
metricUnit = "__unit__"
)
// IsMetadataLabel returns true if the given label name is a special
// schema Metadata label.
func IsMetadataLabel(name string) bool {
return name == metricName || name == metricType || name == metricUnit
}
// Metadata represents the core metric schema/metadata elements that:
// * are describing and identifying the metric schema/shape (e.g. name, type and unit).
// * are contributing to the general metric/series identity.
// * with the type-and-unit feature, are stored as Prometheus labels.
//
// Historically, similar information was encoded in the labels.MetricName (suffixes)
// and in the separate metadata.Metadata structures. However, with the
// type-and-unit-label feature (PROM-39), this information can be now stored directly
// in the special schema metadata labels, which offers better reliability (e.g. atomicity),
// compatibility and, in many cases, efficiency.
//
// NOTE: Metadata in the current form is generally similar (yet different) to:
// - The MetricFamily definition in OpenMetrics (https://prometheus.io/docs/specs/om/open_metrics_spec/#metricfamily).
// However, there is a small and important distinction around the metric name semantics
// for the "classic" representation of complex metrics like histograms. The
// Metadata.Name follows the __name__ semantics. See Name for details.
// - Original metadata.Metadata entries. However, not all fields in that metadata
// are "identifiable", notably the help field, plus metadata does not contain Name.
type Metadata struct {
// Name represents the final metric name for a Prometheus series.
// NOTE(bwplotka): Prometheus scrape formats (e.g. OpenMetrics) define
// the "metric family name". The Metadata.Name (so __name__ label) is not
// always the same as the MetricFamily.Name e.g.:
// * OpenMetrics metric family name on scrape: "acme_http_router_request_seconds"
// * Resulting Prometheus metric name: "acme_http_router_request_seconds_sum"
//
// Empty string means nameless metric (e.g. result of the PromQL function).
Name string
// Type represents the metric type. Empty value ("") is equivalent to
// model.UnknownMetricType.
Type model.MetricType
// Unit represents the metric unit. Empty string means an unitless metric (e.g.
// result of the PromQL function).
//
// NOTE: Currently unit value is not strictly defined other than OpenMetrics
// recommendations: https://prometheus.io/docs/specs/om/open_metrics_spec/#units-and-base-units
// TODO(bwplotka): Consider a stricter validation and rules e.g. lowercase only or UCUM standard.
// Read more in https://github.com/prometheus/proposals/blob/main/proposals/2024-09-25_metadata-labels.md#more-strict-unit-and-type-value-definition
Unit string
}
// NewMetadataFromLabels returns the schema metadata from the labels.
func NewMetadataFromLabels(ls labels.Labels) Metadata {
typ := model.MetricTypeUnknown
if got := ls.Get(metricType); got != "" {
typ = model.MetricType(got)
}
return Metadata{
Name: ls.Get(metricName),
Type: typ,
Unit: ls.Get(metricUnit),
}
}
// IsTypeEmpty returns true if the metric type is empty (not set).
func (m Metadata) IsTypeEmpty() bool {
return m.Type == "" || m.Type == model.MetricTypeUnknown
}
// IsEmptyFor returns true if the Metadata field, represented by the given labelName
// is empty (not set). If the labelName in not representing any Metadata field,
// IsEmptyFor returns true.
func (m Metadata) IsEmptyFor(labelName string) bool {
switch labelName {
case metricName:
return m.Name == ""
case metricType:
return m.IsTypeEmpty()
case metricUnit:
return m.Unit == ""
default:
return true
}
}
// AddToLabels adds metric schema metadata as labels into the labels.ScratchBuilder.
// Empty Metadata fields will be ignored (not added).
func (m Metadata) AddToLabels(b *labels.ScratchBuilder) {
if m.Name != "" {
b.Add(metricName, m.Name)
}
if !m.IsTypeEmpty() {
b.Add(metricType, string(m.Type))
}
if m.Unit != "" {
b.Add(metricUnit, m.Unit)
}
}
// SetToLabels injects metric schema metadata as labels into the labels.Builder.
// It follows the labels.Builder.Set semantics, so empty Metadata fields will
// remove the corresponding existing labels if they were previously set.
func (m Metadata) SetToLabels(b *labels.Builder) {
b.Set(metricName, m.Name)
if m.Type == model.MetricTypeUnknown {
// Unknown equals empty semantically, so remove the label on unknown too as per
// method signature comment.
b.Set(metricType, "")
} else {
b.Set(metricType, string(m.Type))
}
b.Set(metricUnit, m.Unit)
}
// IgnoreOverriddenMetadataLabelsScratchBuilder is a wrapper over labels scratch builder
// that ignores label additions that would collide with non-empty Overwrite Metadata fields.
type IgnoreOverriddenMetadataLabelsScratchBuilder struct {
*labels.ScratchBuilder
Overwrite Metadata
}
// Add a name/value pair, unless it would collide with the non-empty Overwrite Metadata
// field. Note if you Add the same name twice you will get a duplicate label, which is invalid.
func (b IgnoreOverriddenMetadataLabelsScratchBuilder) Add(name, value string) {
if !b.Overwrite.IsEmptyFor(name) {
return
}
b.ScratchBuilder.Add(name, value)
}

@ -0,0 +1,153 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"fmt"
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMetadata(t *testing.T) {
testMeta := Metadata{
Name: "metric_total",
Type: model.MetricTypeCounter,
Unit: "seconds",
}
for _, tcase := range []struct {
emptyName, emptyType, emptyUnit bool
}{
{},
{emptyName: true},
{emptyType: true},
{emptyUnit: true},
{emptyName: true, emptyType: true, emptyUnit: true},
} {
var (
expectedMeta Metadata
expectedLabels labels.Labels
)
{
// Setup expectations.
lb := labels.NewScratchBuilder(0)
lb.Add("foo", "bar")
if !tcase.emptyName {
lb.Add(metricName, testMeta.Name)
expectedMeta.Name = testMeta.Name
}
if !tcase.emptyType {
lb.Add(metricType, string(testMeta.Type))
expectedMeta.Type = testMeta.Type
} else {
expectedMeta.Type = model.MetricTypeUnknown
}
if !tcase.emptyUnit {
lb.Add(metricUnit, testMeta.Unit)
expectedMeta.Unit = testMeta.Unit
}
lb.Sort()
expectedLabels = lb.Labels()
}
t.Run(fmt.Sprintf("meta=%#v", expectedMeta), func(t *testing.T) {
{
// From labels to Metadata.
got := NewMetadataFromLabels(expectedLabels)
require.Equal(t, expectedMeta, got)
}
{
// Empty methods.
require.Equal(t, tcase.emptyName, expectedMeta.IsEmptyFor(metricName))
require.Equal(t, tcase.emptyType, expectedMeta.IsEmptyFor(metricType))
require.Equal(t, tcase.emptyType, expectedMeta.IsTypeEmpty())
require.Equal(t, tcase.emptyUnit, expectedMeta.IsEmptyFor(metricUnit))
}
{
// From Metadata to labels for various builders.
slb := labels.NewScratchBuilder(0)
slb.Add("foo", "bar")
expectedMeta.AddToLabels(&slb)
slb.Sort()
testutil.RequireEqual(t, expectedLabels, slb.Labels())
lb := labels.NewBuilder(labels.FromStrings("foo", "bar"))
expectedMeta.SetToLabels(lb)
testutil.RequireEqual(t, expectedLabels, lb.Labels())
}
})
}
}
func TestIgnoreOverriddenMetadataLabelsScratchBuilder(t *testing.T) {
// PROM-39 specifies that metadata labels should be sourced primarily from the metadata structures.
// However, the original labels should be preserved IF the metadata structure does not set or support certain information.
// Test those cases with common label interactions.
incomingLabels := labels.FromStrings(metricName, "different_name", metricType, string(model.MetricTypeSummary), metricUnit, "MB", "foo", "bar")
for _, tcase := range []struct {
highPrioMeta Metadata
expectedLabels labels.Labels
}{
{
expectedLabels: incomingLabels,
},
{
highPrioMeta: Metadata{
Name: "metric_total",
Type: model.MetricTypeCounter,
Unit: "seconds",
},
expectedLabels: labels.FromStrings(metricName, "metric_total", metricType, string(model.MetricTypeCounter), metricUnit, "seconds", "foo", "bar"),
},
{
highPrioMeta: Metadata{
Name: "metric_total",
Type: model.MetricTypeCounter,
},
expectedLabels: labels.FromStrings(metricName, "metric_total", metricType, string(model.MetricTypeCounter), metricUnit, "MB", "foo", "bar"),
},
{
highPrioMeta: Metadata{
Type: model.MetricTypeCounter,
Unit: "seconds",
},
expectedLabels: labels.FromStrings(metricName, "different_name", metricType, string(model.MetricTypeCounter), metricUnit, "seconds", "foo", "bar"),
},
{
highPrioMeta: Metadata{
Name: "metric_total",
Type: model.MetricTypeUnknown,
Unit: "seconds",
},
expectedLabels: labels.FromStrings(metricName, "metric_total", metricType, string(model.MetricTypeSummary), metricUnit, "seconds", "foo", "bar"),
},
} {
t.Run(fmt.Sprintf("meta=%#v", tcase.highPrioMeta), func(t *testing.T) {
lb := labels.NewScratchBuilder(0)
tcase.highPrioMeta.AddToLabels(&lb)
wrapped := &IgnoreOverriddenMetadataLabelsScratchBuilder{ScratchBuilder: &lb, Overwrite: tcase.highPrioMeta}
incomingLabels.Range(func(l labels.Label) {
wrapped.Add(l.Name, l.Value)
})
lb.Sort()
require.Equal(t, tcase.expectedLabels, lb.Labels())
})
}
}

@ -87,6 +87,9 @@ type Options struct {
// Option to enable the ingestion of native histograms.
EnableNativeHistogramsIngestion bool
// EnableTypeAndUnitLabels
EnableTypeAndUnitLabels bool
// Optional HTTP client options to use when scraping.
HTTPClientOptions []config_util.HTTPClientOption

@ -209,6 +209,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
opts.convertClassicHistToNHCB,
options.EnableNativeHistogramsIngestion,
options.EnableCreatedTimestampZeroIngestion,
options.EnableTypeAndUnitLabels,
options.ExtraMetrics,
options.AppendMetadata,
opts.target,
@ -932,6 +933,7 @@ type scrapeLoop struct {
// Feature flagged options.
enableNativeHistogramIngestion bool
enableCTZeroIngestion bool
enableTypeAndUnitLabels bool
appender func(ctx context.Context) storage.Appender
symbolTable *labels.SymbolTable
@ -1239,6 +1241,7 @@ func newScrapeLoop(ctx context.Context,
convertClassicHistToNHCB bool,
enableNativeHistogramIngestion bool,
enableCTZeroIngestion bool,
enableTypeAndUnitLabels bool,
reportExtraMetrics bool,
appendMetadataToWAL bool,
target *Target,
@ -1296,6 +1299,7 @@ func newScrapeLoop(ctx context.Context,
convertClassicHistToNHCB: convertClassicHistToNHCB,
enableNativeHistogramIngestion: enableNativeHistogramIngestion,
enableCTZeroIngestion: enableCTZeroIngestion,
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
reportExtraMetrics: reportExtraMetrics,
appendMetadataToWAL: appendMetadataToWAL,
metrics: metrics,
@ -1622,7 +1626,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
return
}
p, err := textparse.New(b, contentType, sl.fallbackScrapeProtocol, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.symbolTable)
p, err := textparse.New(b, contentType, sl.fallbackScrapeProtocol, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.enableTypeAndUnitLabels, sl.symbolTable)
if p == nil {
sl.l.Error(
"Failed to determine correct type of scrape target.",

@ -982,6 +982,7 @@ func newBasicScrapeLoopWithFallback(t testing.TB, ctx context.Context, scraper s
false,
false,
false,
false,
true,
nil,
false,
@ -1130,6 +1131,7 @@ func TestScrapeLoopRun(t *testing.T) {
false,
false,
false,
false,
nil,
false,
scrapeMetrics,
@ -1278,6 +1280,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
false,
false,
false,
false,
nil,
false,
scrapeMetrics,
@ -2005,7 +2008,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
fakeRef := storage.SeriesRef(1)
expValue := float64(1)
metric := []byte(`metric{n="1"} 1`)
p, warning := textparse.New(metric, "text/plain", "", false, false, labels.NewSymbolTable())
p, warning := textparse.New(metric, "text/plain", "", false, false, false, labels.NewSymbolTable())
require.NotNil(t, p)
require.NoError(t, warning)

@ -208,7 +208,7 @@ Loop:
}
if l.Name == labels.MetricName {
nameSeen = true
if l.Value == lastMetricName && // We already have the name in the current MetricFamily, and we ignore nameless metrics.
if l.Value == lastMetricName && // We already have the name in the current MetricDescriptor, and we ignore nameless metrics.
lastWasHistogram == isHistogram && // The sample type matches (float vs histogram).
// If it was a histogram, the histogram type (counter vs gauge) also matches.
(!isHistogram || lastHistogramWasGauge == (s.H.CounterResetHint == histogram.GaugeType)) {
@ -220,7 +220,7 @@ Loop:
// an invalid exposition. But since the consumer of this is Prometheus, and Prometheus can
// parse it fine, we allow it and bend the rules to make federation possible in those cases.
// Need to start a new MetricFamily. Ship off the old one (if any) before
// Need to start a new MetricDescriptor. Ship off the old one (if any) before
// creating the new one.
if protMetricFam != nil {
if err := enc.Encode(protMetricFam); err != nil {
@ -309,7 +309,7 @@ Loop:
lastWasHistogram = isHistogram
protMetricFam.Metric = append(protMetricFam.Metric, protMetric)
}
// Still have to ship off the last MetricFamily, if any.
// Still have to ship off the last MetricDescriptor, if any.
if protMetricFam != nil {
if err := enc.Encode(protMetricFam); err != nil {
federationErrors.Inc()

@ -392,7 +392,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
require.Equal(t, http.StatusOK, res.Code)
body, err := io.ReadAll(res.Body)
require.NoError(t, err)
p := textparse.NewProtobufParser(body, false, labels.NewSymbolTable())
p := textparse.NewProtobufParser(body, false, false, labels.NewSymbolTable())
var actVec promql.Vector
metricFamilies := 0
l := labels.Labels{}

Loading…
Cancel
Save