pull/3631/merge
Kevin Burke 2 weeks ago committed by GitHub
commit d8815a0cf4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 126
      collector/collector_bench_test.go
  2. 1
      collector/filesystem_common.go
  3. 49
      collector/filesystem_linux.go
  4. 83
      collector/textfile.go
  5. 79
      collector/textfile_test.go

@ -0,0 +1,126 @@
//go:build linux
// Copyright 2026 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"fmt"
"io"
"log/slog"
"testing"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/procfs"
)
func BenchmarkParseFilesystemLabels(b *testing.B) {
mountInfo := benchmarkMountInfo(256)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
filesystems, err := parseFilesystemLabels(mountInfo)
if err != nil {
b.Fatal(err)
}
if len(filesystems) != len(mountInfo) {
b.Fatalf("got %d filesystems, want %d", len(filesystems), len(mountInfo))
}
}
}
func BenchmarkTextfileConvertMetricFamily(b *testing.B) {
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
metricFamily := benchmarkMetricFamily(256, 8)
ch := make(chan prometheus.Metric, len(metricFamily.Metric)+1)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
convertMetricFamily(metricFamily, ch, logger)
drainMetrics(ch)
}
}
func benchmarkMountInfo(count int) []*procfs.MountInfo {
mountInfo := make([]*procfs.MountInfo, 0, count)
for i := 0; i < count; i++ {
mountInfo = append(mountInfo, &procfs.MountInfo{
MajorMinorVer: fmt.Sprintf("%d:%d", i/16+8, i%16),
Source: fmt.Sprintf("/dev/vd%c", 'a'+rune(i%26)),
MountPoint: fmt.Sprintf("/var/lib/containers/storage/overlay/%d", i),
FSType: "ext4",
Options: map[string]string{
"rw": "",
"relatime": "",
"discard": "",
},
SuperOptions: map[string]string{
"rw": "",
"errors": "remount-ro",
},
})
}
return mountInfo
}
func benchmarkMetricFamily(metricCount, labelCount int) *dto.MetricFamily {
metrics := make([]*dto.Metric, 0, metricCount)
for i := 0; i < metricCount; i++ {
labels := make([]*dto.LabelPair, 0, labelCount)
for j := 0; j < labelCount; j++ {
// Leave some labels out on each metric so the benchmark exercises
// label union normalization as well as desc reuse.
if (i+j)%3 == 0 {
continue
}
name := fmt.Sprintf("label_%d", j)
value := fmt.Sprintf("value_%d_%d", i, j)
labels = append(labels, &dto.LabelPair{
Name: stringPtr(name),
Value: stringPtr(value),
})
}
value := float64(i)
metrics = append(metrics, &dto.Metric{
Label: labels,
Gauge: &dto.Gauge{Value: &value},
})
}
metricType := dto.MetricType_GAUGE
return &dto.MetricFamily{
Name: stringPtr("node_benchmark_textfile_metric"),
Help: stringPtr("Benchmark metric family for textfile collector."),
Type: &metricType,
Metric: metrics,
}
}
func drainMetrics(ch chan prometheus.Metric) {
for len(ch) > 0 {
<-ch
}
}
func stringPtr(s string) *string {
return &s
}

@ -81,6 +81,7 @@ type filesystemCollector struct {
type filesystemLabels struct {
device, mountPoint, fsType, mountOptions, superOptions, deviceError, major, minor string
readOnly bool
}
type filesystemStats struct {

@ -16,12 +16,10 @@
package collector
import (
"bytes"
"errors"
"fmt"
"log/slog"
"os"
"slices"
"strconv"
"strings"
"sync"
@ -109,7 +107,7 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
func (c *filesystemCollector) processStat(labels filesystemLabels) filesystemStats {
var ro float64
if isFilesystemReadOnly(labels) {
if labels.readOnly {
ro = 1
}
@ -212,38 +210,41 @@ func parseFilesystemLabels(mountInfo []*procfs.MountInfo) ([]filesystemLabels, e
mount.MountPoint = strings.ReplaceAll(mount.MountPoint, "\\011", "\t")
filesystems = append(filesystems, filesystemLabels{
device: mount.Source,
mountPoint: rootfsStripPrefix(mount.MountPoint),
fsType: mount.FSType,
mountOptions: mountOptionsString(mount.Options),
superOptions: mountOptionsString(mount.SuperOptions),
major: strconv.Itoa(major),
minor: strconv.Itoa(minor),
deviceError: "",
device: mount.Source,
mountPoint: rootfsStripPrefix(mount.MountPoint),
fsType: mount.FSType,
readOnly: hasMountOption(mount.Options, "ro") || hasMountOption(mount.SuperOptions, "ro"),
major: strconv.Itoa(major),
minor: strconv.Itoa(minor),
deviceError: "",
})
}
return filesystems, nil
}
func hasMountOption(options map[string]string, key string) bool {
_, ok := options[key]
return ok
}
// see https://github.com/prometheus/node_exporter/issues/3157#issuecomment-2422761187
// if either mount or super options contain "ro" the filesystem is read-only
func isFilesystemReadOnly(labels filesystemLabels) bool {
if slices.Contains(strings.Split(labels.mountOptions, ","), "ro") || slices.Contains(strings.Split(labels.superOptions, ","), "ro") {
if labels.readOnly {
return true
}
return false
}
func mountOptionsString(m map[string]string) string {
b := new(bytes.Buffer)
for key, value := range m {
if value == "" {
fmt.Fprintf(b, "%s", key)
} else {
fmt.Fprintf(b, "%s=%s", key, value)
if strings.Contains(labels.mountOptions, "ro") || strings.Contains(labels.superOptions, "ro") {
// Test-only fallback for labels synthesized without the parsed readOnly bit.
for _, options := range []string{labels.mountOptions, labels.superOptions} {
for _, option := range strings.Split(options, ",") {
if option == "ro" {
return true
}
}
}
return true
}
return b.String()
return false
}

@ -20,7 +20,6 @@ import (
"log/slog"
"os"
"path/filepath"
"slices"
"sort"
"strings"
"time"
@ -40,6 +39,11 @@ var (
[]string{"file"},
nil,
)
textFileScrapeErrorDesc = prometheus.NewDesc(
"node_textfile_scrape_error",
"1 if there was an error opening or reading a file, 0 otherwise",
nil, nil,
)
)
type textFileCollector struct {
@ -64,37 +68,23 @@ func NewTextFileCollector(logger *slog.Logger) (Collector, error) {
}
func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric, logger *slog.Logger) {
labelNames, labelIndexes := metricFamilyLabelNames(metricFamily)
desc := prometheus.NewDesc(
*metricFamily.Name,
metricFamily.GetHelp(),
labelNames, nil,
)
var valType prometheus.ValueType
var val float64
allLabelNames := map[string]struct{}{}
for _, metric := range metricFamily.Metric {
labels := metric.GetLabel()
for _, label := range labels {
if _, ok := allLabelNames[label.GetName()]; !ok {
allLabelNames[label.GetName()] = struct{}{}
}
}
}
for _, metric := range metricFamily.Metric {
if metric.TimestampMs != nil {
logger.Warn("Ignoring unsupported custom timestamp on textfile collector metric", "metric", metric)
}
labels := metric.GetLabel()
var names []string
var values []string
for _, label := range labels {
names = append(names, label.GetName())
values = append(values, label.GetValue())
}
for k := range allLabelNames {
if !slices.Contains(names, k) {
names = append(names, k)
values = append(values, "")
}
values := make([]string, len(labelNames))
for _, label := range metric.GetLabel() {
values[labelIndexes[label.GetName()]] = label.GetValue()
}
metricType := metricFamily.GetType()
@ -117,11 +107,7 @@ func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Me
quantiles[q.GetQuantile()] = q.GetValue()
}
ch <- prometheus.MustNewConstSummary(
prometheus.NewDesc(
*metricFamily.Name,
metricFamily.GetHelp(),
names, nil,
),
desc,
metric.Summary.GetSampleCount(),
metric.Summary.GetSampleSum(),
quantiles, values...,
@ -132,11 +118,7 @@ func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Me
buckets[b.GetUpperBound()] = b.GetCumulativeCount()
}
ch <- prometheus.MustNewConstHistogram(
prometheus.NewDesc(
*metricFamily.Name,
metricFamily.GetHelp(),
names, nil,
),
desc,
metric.Histogram.GetSampleCount(),
metric.Histogram.GetSampleSum(),
buckets, values...,
@ -146,17 +128,29 @@ func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Me
}
if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED {
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
*metricFamily.Name,
metricFamily.GetHelp(),
names, nil,
),
desc,
valType, val, values...,
)
}
}
}
func metricFamilyLabelNames(metricFamily *dto.MetricFamily) ([]string, map[string]int) {
labelIndexes := make(map[string]int)
labelNames := make([]string, 0)
for _, metric := range metricFamily.Metric {
for _, label := range metric.GetLabel() {
if _, ok := labelIndexes[label.GetName()]; ok {
continue
}
labelIndexes[label.GetName()] = len(labelNames)
labelNames = append(labelNames, label.GetName())
}
}
return labelNames, labelIndexes
}
func (c *textFileCollector) exportMTimes(mtimes map[string]time.Time, ch chan<- prometheus.Metric) {
if len(mtimes) == 0 {
return
@ -188,7 +182,7 @@ func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error {
metricsNamesToFiles := map[string][]string{}
metricsNamesToHelpTexts := map[string][2]string{}
paths := []string{}
paths := make([]string, 0, len(c.paths))
for _, glob := range c.paths {
ps, err := filepath.Glob(glob)
if err != nil || len(ps) == 0 {
@ -272,14 +266,7 @@ func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error {
errVal = 1.0
}
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
"node_textfile_scrape_error",
"1 if there was an error opening or reading a file, 0 otherwise",
nil, nil,
),
prometheus.GaugeValue, errVal,
)
ch <- prometheus.MustNewConstMetric(textFileScrapeErrorDesc, prometheus.GaugeValue, errVal)
return nil
}

@ -27,6 +27,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/promslog/flag"
)
@ -156,3 +157,81 @@ func TestTextfileCollector(t *testing.T) {
}
}
}
func TestMetricFamilyLabelNames(t *testing.T) {
tests := []struct {
name string
metricFamily *dto.MetricFamily
wantLabelNames []string
wantLabelIndexes map[string]int
}{
{
name: "empty metric family",
metricFamily: &dto.MetricFamily{},
wantLabelIndexes: map[string]int{},
},
{
name: "preserves first-seen order across sparse labels",
metricFamily: &dto.MetricFamily{
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
dtoLabelPair("code", "200"),
dtoLabelPair("method", "get"),
},
},
{
Label: []*dto.LabelPair{
dtoLabelPair("handler", "query"),
dtoLabelPair("code", "500"),
},
},
{
Label: []*dto.LabelPair{
dtoLabelPair("method", "post"),
dtoLabelPair("tenant", "prod"),
},
},
},
},
wantLabelNames: []string{"code", "method", "handler", "tenant"},
wantLabelIndexes: map[string]int{
"code": 0,
"method": 1,
"handler": 2,
"tenant": 3,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
gotLabelNames, gotLabelIndexes := metricFamilyLabelNames(test.metricFamily)
if len(gotLabelNames) != len(test.wantLabelNames) {
t.Fatalf("got %d label names, want %d", len(gotLabelNames), len(test.wantLabelNames))
}
for i, want := range test.wantLabelNames {
if gotLabelNames[i] != want {
t.Fatalf("labelNames[%d] = %q, want %q", i, gotLabelNames[i], want)
}
}
if len(gotLabelIndexes) != len(test.wantLabelIndexes) {
t.Fatalf("got %d label indexes, want %d", len(gotLabelIndexes), len(test.wantLabelIndexes))
}
for name, want := range test.wantLabelIndexes {
if gotLabelIndexes[name] != want {
t.Fatalf("labelIndexes[%q] = %d, want %d", name, gotLabelIndexes[name], want)
}
}
})
}
}
func dtoLabelPair(name, value string) *dto.LabelPair {
return &dto.LabelPair{
Name: &name,
Value: &value,
}
}

Loading…
Cancel
Save